From c825e797a54804901ef40b2dd1c40e05c8adf98a Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 17 Oct 2023 20:03:16 +0000 Subject: [PATCH 1/2] chore: Update gapic-generator-python to v1.11.8 PiperOrigin-RevId: 574178735 Source-Link: https://github.com/googleapis/googleapis/commit/7307199008ee2d57a4337066de29f9cd8c444bc6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ce3af21b7c559a87c2befc076be0e3aeda3a26f0 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2UzYWYyMWI3YzU1OWE4N2MyYmVmYzA3NmJlMGUzYWVkYTNhMjZmMCJ9 --- owl-bot-staging/v1/.coveragerc | 13 + owl-bot-staging/v1/.flake8 | 33 + owl-bot-staging/v1/MANIFEST.in | 2 + owl-bot-staging/v1/README.rst | 49 + owl-bot-staging/v1/docs/_static/custom.css | 3 + owl-bot-staging/v1/docs/automl_v1/auto_ml.rst | 10 + .../v1/docs/automl_v1/prediction_service.rst | 6 + .../v1/docs/automl_v1/services.rst | 7 + owl-bot-staging/v1/docs/automl_v1/types.rst | 6 + owl-bot-staging/v1/docs/conf.py | 376 + owl-bot-staging/v1/docs/index.rst | 7 + .../v1/google/cloud/automl/__init__.py | 195 + .../v1/google/cloud/automl/gapic_version.py | 16 + .../v1/google/cloud/automl/py.typed | 2 + .../v1/google/cloud/automl_v1/__init__.py | 196 + .../cloud/automl_v1/gapic_metadata.json | 347 + .../google/cloud/automl_v1/gapic_version.py | 16 + .../v1/google/cloud/automl_v1/py.typed | 2 + .../cloud/automl_v1/services/__init__.py | 15 + .../automl_v1/services/auto_ml/__init__.py | 22 + .../services/auto_ml/async_client.py | 2507 +++ .../automl_v1/services/auto_ml/client.py | 2675 +++ .../automl_v1/services/auto_ml/pagers.py | 384 + .../services/auto_ml/transports/__init__.py | 38 + .../services/auto_ml/transports/base.py | 462 + .../services/auto_ml/transports/grpc.py | 796 + .../auto_ml/transports/grpc_asyncio.py | 795 + .../services/auto_ml/transports/rest.py | 2366 +++ .../services/prediction_service/__init__.py | 22 + .../prediction_service/async_client.py | 656 + .../services/prediction_service/client.py | 858 + .../prediction_service/transports/__init__.py | 38 + .../prediction_service/transports/base.py | 169 + .../prediction_service/transports/grpc.py | 367 + .../transports/grpc_asyncio.py | 366 + .../prediction_service/transports/rest.py | 484 + .../google/cloud/automl_v1/types/__init__.py | 220 + .../automl_v1/types/annotation_payload.py | 126 + .../cloud/automl_v1/types/annotation_spec.py | 61 + .../cloud/automl_v1/types/classification.py | 310 + .../cloud/automl_v1/types/data_items.py | 337 + .../google/cloud/automl_v1/types/dataset.py | 181 + .../google/cloud/automl_v1/types/detection.py | 165 + .../google/cloud/automl_v1/types/geometry.py | 75 + .../v1/google/cloud/automl_v1/types/image.py | 318 + .../v1/google/cloud/automl_v1/types/io.py | 1572 ++ .../v1/google/cloud/automl_v1/types/model.py | 201 + .../cloud/automl_v1/types/model_evaluation.py | 167 + .../cloud/automl_v1/types/operations.py | 330 + .../automl_v1/types/prediction_service.py | 302 + .../google/cloud/automl_v1/types/service.py | 621 + .../v1/google/cloud/automl_v1/types/text.py | 104 + .../cloud/automl_v1/types/text_extraction.py | 125 + .../cloud/automl_v1/types/text_segment.py | 63 + .../cloud/automl_v1/types/text_sentiment.py | 132 + .../cloud/automl_v1/types/translation.py | 125 + owl-bot-staging/v1/mypy.ini | 3 + owl-bot-staging/v1/noxfile.py | 184 + ..._generated_auto_ml_create_dataset_async.py | 61 + ...1_generated_auto_ml_create_dataset_sync.py | 61 + ...v1_generated_auto_ml_create_model_async.py | 56 + ..._v1_generated_auto_ml_create_model_sync.py | 56 + ..._generated_auto_ml_delete_dataset_async.py | 56 + ...1_generated_auto_ml_delete_dataset_sync.py | 56 + ...v1_generated_auto_ml_delete_model_async.py | 56 + ..._v1_generated_auto_ml_delete_model_sync.py | 56 + ...v1_generated_auto_ml_deploy_model_async.py | 56 + ..._v1_generated_auto_ml_deploy_model_sync.py | 56 + ..._v1_generated_auto_ml_export_data_async.py | 60 + ...l_v1_generated_auto_ml_export_data_sync.py | 60 + ...v1_generated_auto_ml_export_model_async.py | 60 + ..._v1_generated_auto_ml_export_model_sync.py | 60 + ...rated_auto_ml_get_annotation_spec_async.py | 52 + ...erated_auto_ml_get_annotation_spec_sync.py | 52 + ..._v1_generated_auto_ml_get_dataset_async.py | 52 + ...l_v1_generated_auto_ml_get_dataset_sync.py | 52 + ...ml_v1_generated_auto_ml_get_model_async.py | 52 + ...ated_auto_ml_get_model_evaluation_async.py | 52 + ...rated_auto_ml_get_model_evaluation_sync.py | 52 + ...oml_v1_generated_auto_ml_get_model_sync.py | 52 + ..._v1_generated_auto_ml_import_data_async.py | 60 + ...l_v1_generated_auto_ml_import_data_sync.py | 60 + ...1_generated_auto_ml_list_datasets_async.py | 53 + ...v1_generated_auto_ml_list_datasets_sync.py | 53 + ...ed_auto_ml_list_model_evaluations_async.py | 54 + ...ted_auto_ml_list_model_evaluations_sync.py | 54 + ..._v1_generated_auto_ml_list_models_async.py | 53 + ...l_v1_generated_auto_ml_list_models_sync.py | 53 + ..._generated_auto_ml_undeploy_model_async.py | 56 + ...1_generated_auto_ml_undeploy_model_sync.py | 56 + ..._generated_auto_ml_update_dataset_async.py | 56 + ...1_generated_auto_ml_update_dataset_sync.py | 56 + ...v1_generated_auto_ml_update_model_async.py | 51 + ..._v1_generated_auto_ml_update_model_sync.py | 51 + ..._prediction_service_batch_predict_async.py | 64 + ...d_prediction_service_batch_predict_sync.py | 64 + ...erated_prediction_service_predict_async.py | 56 + ...nerated_prediction_service_predict_sync.py | 56 + ...ippet_metadata_google.cloud.automl.v1.json | 3339 ++++ .../v1/scripts/fixup_automl_v1_keywords.py | 195 + owl-bot-staging/v1/setup.py | 90 + .../v1/testing/constraints-3.10.txt | 6 + .../v1/testing/constraints-3.11.txt | 6 + .../v1/testing/constraints-3.12.txt | 6 + .../v1/testing/constraints-3.7.txt | 9 + .../v1/testing/constraints-3.8.txt | 6 + .../v1/testing/constraints-3.9.txt | 6 + owl-bot-staging/v1/tests/__init__.py | 16 + owl-bot-staging/v1/tests/unit/__init__.py | 16 + .../v1/tests/unit/gapic/__init__.py | 16 + .../v1/tests/unit/gapic/automl_v1/__init__.py | 16 + .../unit/gapic/automl_v1/test_auto_ml.py | 10997 ++++++++++++ .../automl_v1/test_prediction_service.py | 2269 +++ owl-bot-staging/v1beta1/.coveragerc | 13 + owl-bot-staging/v1beta1/.flake8 | 33 + owl-bot-staging/v1beta1/MANIFEST.in | 2 + owl-bot-staging/v1beta1/README.rst | 49 + .../v1beta1/docs/_static/custom.css | 3 + .../v1beta1/docs/automl_v1beta1/auto_ml.rst | 10 + .../automl_v1beta1/prediction_service.rst | 6 + .../v1beta1/docs/automl_v1beta1/services.rst | 7 + .../v1beta1/docs/automl_v1beta1/types.rst | 6 + owl-bot-staging/v1beta1/docs/conf.py | 376 + owl-bot-staging/v1beta1/docs/index.rst | 7 + .../v1beta1/google/cloud/automl/__init__.py | 275 + .../google/cloud/automl/gapic_version.py | 16 + .../v1beta1/google/cloud/automl/py.typed | 2 + .../google/cloud/automl_v1beta1/__init__.py | 276 + .../cloud/automl_v1beta1/gapic_metadata.json | 437 + .../cloud/automl_v1beta1/gapic_version.py | 16 + .../google/cloud/automl_v1beta1/py.typed | 2 + .../cloud/automl_v1beta1/services/__init__.py | 15 + .../services/auto_ml/__init__.py | 22 + .../services/auto_ml/async_client.py | 3170 ++++ .../automl_v1beta1/services/auto_ml/client.py | 3335 ++++ .../automl_v1beta1/services/auto_ml/pagers.py | 628 + .../services/auto_ml/transports/__init__.py | 38 + .../services/auto_ml/transports/base.py | 570 + .../services/auto_ml/transports/grpc.py | 971 ++ .../auto_ml/transports/grpc_asyncio.py | 970 ++ .../services/auto_ml/transports/rest.py | 3091 ++++ .../services/prediction_service/__init__.py | 22 + .../prediction_service/async_client.py | 621 + .../services/prediction_service/client.py | 823 + .../prediction_service/transports/__init__.py | 38 + .../prediction_service/transports/base.py | 169 + .../prediction_service/transports/grpc.py | 348 + .../transports/grpc_asyncio.py | 347 + .../prediction_service/transports/rest.py | 484 + .../cloud/automl_v1beta1/types/__init__.py | 318 + .../types/annotation_payload.py | 158 + .../automl_v1beta1/types/annotation_spec.py | 62 + .../automl_v1beta1/types/classification.py | 379 + .../cloud/automl_v1beta1/types/column_spec.py | 120 + .../cloud/automl_v1beta1/types/data_items.py | 398 + .../cloud/automl_v1beta1/types/data_stats.py | 361 + .../cloud/automl_v1beta1/types/data_types.py | 180 + .../cloud/automl_v1beta1/types/dataset.py | 198 + .../cloud/automl_v1beta1/types/detection.py | 264 + .../cloud/automl_v1beta1/types/geometry.py | 75 + .../cloud/automl_v1beta1/types/image.py | 304 + .../google/cloud/automl_v1beta1/types/io.py | 1253 ++ .../cloud/automl_v1beta1/types/model.py | 208 + .../automl_v1beta1/types/model_evaluation.py | 196 + .../cloud/automl_v1beta1/types/operations.py | 392 + .../types/prediction_service.py | 285 + .../cloud/automl_v1beta1/types/ranges.py | 51 + .../cloud/automl_v1beta1/types/regression.py | 71 + .../cloud/automl_v1beta1/types/service.py | 874 + .../cloud/automl_v1beta1/types/table_spec.py | 111 + .../cloud/automl_v1beta1/types/tables.py | 426 + .../cloud/automl_v1beta1/types/temporal.py | 60 + .../google/cloud/automl_v1beta1/types/text.py | 119 + .../automl_v1beta1/types/text_extraction.py | 125 + .../automl_v1beta1/types/text_segment.py | 63 + .../automl_v1beta1/types/text_sentiment.py | 139 + .../cloud/automl_v1beta1/types/translation.py | 125 + .../cloud/automl_v1beta1/types/video.py | 56 + owl-bot-staging/v1beta1/mypy.ini | 3 + owl-bot-staging/v1beta1/noxfile.py | 184 + ..._generated_auto_ml_create_dataset_async.py | 57 + ...1_generated_auto_ml_create_dataset_sync.py | 57 + ...a1_generated_auto_ml_create_model_async.py | 56 + ...ta1_generated_auto_ml_create_model_sync.py | 56 + ..._generated_auto_ml_delete_dataset_async.py | 56 + ...1_generated_auto_ml_delete_dataset_sync.py | 56 + ...a1_generated_auto_ml_delete_model_async.py | 56 + ...ta1_generated_auto_ml_delete_model_sync.py | 56 + ...a1_generated_auto_ml_deploy_model_async.py | 56 + ...ta1_generated_auto_ml_deploy_model_sync.py | 56 + ...ta1_generated_auto_ml_export_data_async.py | 56 + ...eta1_generated_auto_ml_export_data_sync.py | 56 + ...auto_ml_export_evaluated_examples_async.py | 56 + ..._auto_ml_export_evaluated_examples_sync.py | 56 + ...a1_generated_auto_ml_export_model_async.py | 56 + ...ta1_generated_auto_ml_export_model_sync.py | 56 + ...rated_auto_ml_get_annotation_spec_async.py | 52 + ...erated_auto_ml_get_annotation_spec_sync.py | 52 + ...generated_auto_ml_get_column_spec_async.py | 52 + ..._generated_auto_ml_get_column_spec_sync.py | 52 + ...ta1_generated_auto_ml_get_dataset_async.py | 52 + ...eta1_generated_auto_ml_get_dataset_sync.py | 52 + ...beta1_generated_auto_ml_get_model_async.py | 52 + ...ated_auto_ml_get_model_evaluation_async.py | 52 + ...rated_auto_ml_get_model_evaluation_sync.py | 52 + ...1beta1_generated_auto_ml_get_model_sync.py | 52 + ..._generated_auto_ml_get_table_spec_async.py | 52 + ...1_generated_auto_ml_get_table_spec_sync.py | 52 + ...ta1_generated_auto_ml_import_data_async.py | 56 + ...eta1_generated_auto_ml_import_data_sync.py | 56 + ...nerated_auto_ml_list_column_specs_async.py | 53 + ...enerated_auto_ml_list_column_specs_sync.py | 53 + ...1_generated_auto_ml_list_datasets_async.py | 53 + ...a1_generated_auto_ml_list_datasets_sync.py | 53 + ...ed_auto_ml_list_model_evaluations_async.py | 53 + ...ted_auto_ml_list_model_evaluations_sync.py | 53 + ...ta1_generated_auto_ml_list_models_async.py | 53 + ...eta1_generated_auto_ml_list_models_sync.py | 53 + ...enerated_auto_ml_list_table_specs_async.py | 53 + ...generated_auto_ml_list_table_specs_sync.py | 53 + ..._generated_auto_ml_undeploy_model_async.py | 56 + ...1_generated_auto_ml_undeploy_model_sync.py | 56 + ...erated_auto_ml_update_column_spec_async.py | 51 + ...nerated_auto_ml_update_column_spec_sync.py | 51 + ..._generated_auto_ml_update_dataset_async.py | 56 + ...1_generated_auto_ml_update_dataset_sync.py | 56 + ...nerated_auto_ml_update_table_spec_async.py | 51 + ...enerated_auto_ml_update_table_spec_sync.py | 51 + ..._prediction_service_batch_predict_async.py | 56 + ...d_prediction_service_batch_predict_sync.py | 56 + ...erated_prediction_service_predict_async.py | 56 + ...nerated_prediction_service_predict_sync.py | 56 + ..._metadata_google.cloud.automl.v1beta1.json | 4289 +++++ .../scripts/fixup_automl_v1beta1_keywords.py | 201 + owl-bot-staging/v1beta1/setup.py | 90 + .../v1beta1/testing/constraints-3.10.txt | 6 + .../v1beta1/testing/constraints-3.11.txt | 6 + .../v1beta1/testing/constraints-3.12.txt | 6 + .../v1beta1/testing/constraints-3.7.txt | 9 + .../v1beta1/testing/constraints-3.8.txt | 6 + .../v1beta1/testing/constraints-3.9.txt | 6 + owl-bot-staging/v1beta1/tests/__init__.py | 16 + .../v1beta1/tests/unit/__init__.py | 16 + .../v1beta1/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/automl_v1beta1/__init__.py | 16 + .../unit/gapic/automl_v1beta1/test_auto_ml.py | 14494 ++++++++++++++++ .../automl_v1beta1/test_prediction_service.py | 2270 +++ 247 files changed, 88272 insertions(+) create mode 100644 owl-bot-staging/v1/.coveragerc create mode 100644 owl-bot-staging/v1/.flake8 create mode 100644 owl-bot-staging/v1/MANIFEST.in create mode 100644 owl-bot-staging/v1/README.rst create mode 100644 owl-bot-staging/v1/docs/_static/custom.css create mode 100644 owl-bot-staging/v1/docs/automl_v1/auto_ml.rst create mode 100644 owl-bot-staging/v1/docs/automl_v1/prediction_service.rst create mode 100644 owl-bot-staging/v1/docs/automl_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/automl_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/conf.py create mode 100644 owl-bot-staging/v1/docs/index.rst create mode 100644 owl-bot-staging/v1/google/cloud/automl/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl/gapic_version.py create mode 100644 owl-bot-staging/v1/google/cloud/automl/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/image.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/io.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/model.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/service.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py create mode 100644 owl-bot-staging/v1/mypy.ini create mode 100644 owl-bot-staging/v1/noxfile.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json create mode 100644 owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py create mode 100644 owl-bot-staging/v1/setup.py create mode 100644 owl-bot-staging/v1/testing/constraints-3.10.txt create mode 100644 owl-bot-staging/v1/testing/constraints-3.11.txt create mode 100644 owl-bot-staging/v1/testing/constraints-3.12.txt create mode 100644 owl-bot-staging/v1/testing/constraints-3.7.txt create mode 100644 owl-bot-staging/v1/testing/constraints-3.8.txt create mode 100644 owl-bot-staging/v1/testing/constraints-3.9.txt create mode 100644 owl-bot-staging/v1/tests/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py create mode 100644 owl-bot-staging/v1beta1/.coveragerc create mode 100644 owl-bot-staging/v1beta1/.flake8 create mode 100644 owl-bot-staging/v1beta1/MANIFEST.in create mode 100644 owl-bot-staging/v1beta1/README.rst create mode 100644 owl-bot-staging/v1beta1/docs/_static/custom.css create mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst create mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/conf.py create mode 100644 owl-bot-staging/v1beta1/docs/index.rst create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py create mode 100644 owl-bot-staging/v1beta1/mypy.ini create mode 100644 owl-bot-staging/v1beta1/noxfile.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/setup.py create mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.10.txt create mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.11.txt create mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.12.txt create mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.7.txt create mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.8.txt create mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.9.txt create mode 100644 owl-bot-staging/v1beta1/tests/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc new file mode 100644 index 00000000..8705cefd --- /dev/null +++ b/owl-bot-staging/v1/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/automl/__init__.py + google/cloud/automl/gapic_version.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/owl-bot-staging/v1/.flake8 b/owl-bot-staging/v1/.flake8 new file mode 100644 index 00000000..29227d4c --- /dev/null +++ b/owl-bot-staging/v1/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in new file mode 100644 index 00000000..f376b2aa --- /dev/null +++ b/owl-bot-staging/v1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/automl *.py +recursive-include google/cloud/automl_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst new file mode 100644 index 00000000..d0dde648 --- /dev/null +++ b/owl-bot-staging/v1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Automl API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Automl API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/_static/custom.css b/owl-bot-staging/v1/docs/_static/custom.css new file mode 100644 index 00000000..06423be0 --- /dev/null +++ b/owl-bot-staging/v1/docs/_static/custom.css @@ -0,0 +1,3 @@ +dl.field-list > dt { + min-width: 100px +} diff --git a/owl-bot-staging/v1/docs/automl_v1/auto_ml.rst b/owl-bot-staging/v1/docs/automl_v1/auto_ml.rst new file mode 100644 index 00000000..c8994a59 --- /dev/null +++ b/owl-bot-staging/v1/docs/automl_v1/auto_ml.rst @@ -0,0 +1,10 @@ +AutoMl +------------------------ + +.. automodule:: google.cloud.automl_v1.services.auto_ml + :members: + :inherited-members: + +.. automodule:: google.cloud.automl_v1.services.auto_ml.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/automl_v1/prediction_service.rst b/owl-bot-staging/v1/docs/automl_v1/prediction_service.rst new file mode 100644 index 00000000..d8f6da92 --- /dev/null +++ b/owl-bot-staging/v1/docs/automl_v1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.automl_v1.services.prediction_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/automl_v1/services.rst b/owl-bot-staging/v1/docs/automl_v1/services.rst new file mode 100644 index 00000000..ce8e2c3d --- /dev/null +++ b/owl-bot-staging/v1/docs/automl_v1/services.rst @@ -0,0 +1,7 @@ +Services for Google Cloud Automl v1 API +======================================= +.. toctree:: + :maxdepth: 2 + + auto_ml + prediction_service diff --git a/owl-bot-staging/v1/docs/automl_v1/types.rst b/owl-bot-staging/v1/docs/automl_v1/types.rst new file mode 100644 index 00000000..14a31a9e --- /dev/null +++ b/owl-bot-staging/v1/docs/automl_v1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Automl v1 API +==================================== + +.. automodule:: google.cloud.automl_v1.types + :members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py new file mode 100644 index 00000000..708bcaa7 --- /dev/null +++ b/owl-bot-staging/v1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-automl documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "4.0.1" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = u"google-cloud-automl" +copyright = u"2023, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-automl-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-cloud-automl.tex", + u"google-cloud-automl Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-cloud-automl", + u"Google Cloud Automl Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-cloud-automl", + u"google-cloud-automl Documentation", + author, + "google-cloud-automl", + "GAPIC library for Google Cloud Automl API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst new file mode 100644 index 00000000..b5adf159 --- /dev/null +++ b/owl-bot-staging/v1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + automl_v1/services + automl_v1/types diff --git a/owl-bot-staging/v1/google/cloud/automl/__init__.py b/owl-bot-staging/v1/google/cloud/automl/__init__.py new file mode 100644 index 00000000..e8f62a9d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl/__init__.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.automl import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.cloud.automl_v1.services.auto_ml.client import AutoMlClient +from google.cloud.automl_v1.services.auto_ml.async_client import AutoMlAsyncClient +from google.cloud.automl_v1.services.prediction_service.client import PredictionServiceClient +from google.cloud.automl_v1.services.prediction_service.async_client import PredictionServiceAsyncClient + +from google.cloud.automl_v1.types.annotation_payload import AnnotationPayload +from google.cloud.automl_v1.types.annotation_spec import AnnotationSpec +from google.cloud.automl_v1.types.classification import ClassificationAnnotation +from google.cloud.automl_v1.types.classification import ClassificationEvaluationMetrics +from google.cloud.automl_v1.types.classification import ClassificationType +from google.cloud.automl_v1.types.data_items import Document +from google.cloud.automl_v1.types.data_items import DocumentDimensions +from google.cloud.automl_v1.types.data_items import ExamplePayload +from google.cloud.automl_v1.types.data_items import Image +from google.cloud.automl_v1.types.data_items import TextSnippet +from google.cloud.automl_v1.types.dataset import Dataset +from google.cloud.automl_v1.types.detection import BoundingBoxMetricsEntry +from google.cloud.automl_v1.types.detection import ImageObjectDetectionAnnotation +from google.cloud.automl_v1.types.detection import ImageObjectDetectionEvaluationMetrics +from google.cloud.automl_v1.types.geometry import BoundingPoly +from google.cloud.automl_v1.types.geometry import NormalizedVertex +from google.cloud.automl_v1.types.image import ImageClassificationDatasetMetadata +from google.cloud.automl_v1.types.image import ImageClassificationModelDeploymentMetadata +from google.cloud.automl_v1.types.image import ImageClassificationModelMetadata +from google.cloud.automl_v1.types.image import ImageObjectDetectionDatasetMetadata +from google.cloud.automl_v1.types.image import ImageObjectDetectionModelDeploymentMetadata +from google.cloud.automl_v1.types.image import ImageObjectDetectionModelMetadata +from google.cloud.automl_v1.types.io import BatchPredictInputConfig +from google.cloud.automl_v1.types.io import BatchPredictOutputConfig +from google.cloud.automl_v1.types.io import DocumentInputConfig +from google.cloud.automl_v1.types.io import GcsDestination +from google.cloud.automl_v1.types.io import GcsSource +from google.cloud.automl_v1.types.io import InputConfig +from google.cloud.automl_v1.types.io import ModelExportOutputConfig +from google.cloud.automl_v1.types.io import OutputConfig +from google.cloud.automl_v1.types.model import Model +from google.cloud.automl_v1.types.model_evaluation import ModelEvaluation +from google.cloud.automl_v1.types.operations import BatchPredictOperationMetadata +from google.cloud.automl_v1.types.operations import CreateDatasetOperationMetadata +from google.cloud.automl_v1.types.operations import CreateModelOperationMetadata +from google.cloud.automl_v1.types.operations import DeleteOperationMetadata +from google.cloud.automl_v1.types.operations import DeployModelOperationMetadata +from google.cloud.automl_v1.types.operations import ExportDataOperationMetadata +from google.cloud.automl_v1.types.operations import ExportModelOperationMetadata +from google.cloud.automl_v1.types.operations import ImportDataOperationMetadata +from google.cloud.automl_v1.types.operations import OperationMetadata +from google.cloud.automl_v1.types.operations import UndeployModelOperationMetadata +from google.cloud.automl_v1.types.prediction_service import BatchPredictRequest +from google.cloud.automl_v1.types.prediction_service import BatchPredictResult +from google.cloud.automl_v1.types.prediction_service import PredictRequest +from google.cloud.automl_v1.types.prediction_service import PredictResponse +from google.cloud.automl_v1.types.service import CreateDatasetRequest +from google.cloud.automl_v1.types.service import CreateModelRequest +from google.cloud.automl_v1.types.service import DeleteDatasetRequest +from google.cloud.automl_v1.types.service import DeleteModelRequest +from google.cloud.automl_v1.types.service import DeployModelRequest +from google.cloud.automl_v1.types.service import ExportDataRequest +from google.cloud.automl_v1.types.service import ExportModelRequest +from google.cloud.automl_v1.types.service import GetAnnotationSpecRequest +from google.cloud.automl_v1.types.service import GetDatasetRequest +from google.cloud.automl_v1.types.service import GetModelEvaluationRequest +from google.cloud.automl_v1.types.service import GetModelRequest +from google.cloud.automl_v1.types.service import ImportDataRequest +from google.cloud.automl_v1.types.service import ListDatasetsRequest +from google.cloud.automl_v1.types.service import ListDatasetsResponse +from google.cloud.automl_v1.types.service import ListModelEvaluationsRequest +from google.cloud.automl_v1.types.service import ListModelEvaluationsResponse +from google.cloud.automl_v1.types.service import ListModelsRequest +from google.cloud.automl_v1.types.service import ListModelsResponse +from google.cloud.automl_v1.types.service import UndeployModelRequest +from google.cloud.automl_v1.types.service import UpdateDatasetRequest +from google.cloud.automl_v1.types.service import UpdateModelRequest +from google.cloud.automl_v1.types.text import TextClassificationDatasetMetadata +from google.cloud.automl_v1.types.text import TextClassificationModelMetadata +from google.cloud.automl_v1.types.text import TextExtractionDatasetMetadata +from google.cloud.automl_v1.types.text import TextExtractionModelMetadata +from google.cloud.automl_v1.types.text import TextSentimentDatasetMetadata +from google.cloud.automl_v1.types.text import TextSentimentModelMetadata +from google.cloud.automl_v1.types.text_extraction import TextExtractionAnnotation +from google.cloud.automl_v1.types.text_extraction import TextExtractionEvaluationMetrics +from google.cloud.automl_v1.types.text_segment import TextSegment +from google.cloud.automl_v1.types.text_sentiment import TextSentimentAnnotation +from google.cloud.automl_v1.types.text_sentiment import TextSentimentEvaluationMetrics +from google.cloud.automl_v1.types.translation import TranslationAnnotation +from google.cloud.automl_v1.types.translation import TranslationDatasetMetadata +from google.cloud.automl_v1.types.translation import TranslationEvaluationMetrics +from google.cloud.automl_v1.types.translation import TranslationModelMetadata + +__all__ = ('AutoMlClient', + 'AutoMlAsyncClient', + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', + 'AnnotationPayload', + 'AnnotationSpec', + 'ClassificationAnnotation', + 'ClassificationEvaluationMetrics', + 'ClassificationType', + 'Document', + 'DocumentDimensions', + 'ExamplePayload', + 'Image', + 'TextSnippet', + 'Dataset', + 'BoundingBoxMetricsEntry', + 'ImageObjectDetectionAnnotation', + 'ImageObjectDetectionEvaluationMetrics', + 'BoundingPoly', + 'NormalizedVertex', + 'ImageClassificationDatasetMetadata', + 'ImageClassificationModelDeploymentMetadata', + 'ImageClassificationModelMetadata', + 'ImageObjectDetectionDatasetMetadata', + 'ImageObjectDetectionModelDeploymentMetadata', + 'ImageObjectDetectionModelMetadata', + 'BatchPredictInputConfig', + 'BatchPredictOutputConfig', + 'DocumentInputConfig', + 'GcsDestination', + 'GcsSource', + 'InputConfig', + 'ModelExportOutputConfig', + 'OutputConfig', + 'Model', + 'ModelEvaluation', + 'BatchPredictOperationMetadata', + 'CreateDatasetOperationMetadata', + 'CreateModelOperationMetadata', + 'DeleteOperationMetadata', + 'DeployModelOperationMetadata', + 'ExportDataOperationMetadata', + 'ExportModelOperationMetadata', + 'ImportDataOperationMetadata', + 'OperationMetadata', + 'UndeployModelOperationMetadata', + 'BatchPredictRequest', + 'BatchPredictResult', + 'PredictRequest', + 'PredictResponse', + 'CreateDatasetRequest', + 'CreateModelRequest', + 'DeleteDatasetRequest', + 'DeleteModelRequest', + 'DeployModelRequest', + 'ExportDataRequest', + 'ExportModelRequest', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'GetModelEvaluationRequest', + 'GetModelRequest', + 'ImportDataRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'UndeployModelRequest', + 'UpdateDatasetRequest', + 'UpdateModelRequest', + 'TextClassificationDatasetMetadata', + 'TextClassificationModelMetadata', + 'TextExtractionDatasetMetadata', + 'TextExtractionModelMetadata', + 'TextSentimentDatasetMetadata', + 'TextSentimentModelMetadata', + 'TextExtractionAnnotation', + 'TextExtractionEvaluationMetrics', + 'TextSegment', + 'TextSentimentAnnotation', + 'TextSentimentEvaluationMetrics', + 'TranslationAnnotation', + 'TranslationDatasetMetadata', + 'TranslationEvaluationMetrics', + 'TranslationModelMetadata', +) diff --git a/owl-bot-staging/v1/google/cloud/automl/gapic_version.py b/owl-bot-staging/v1/google/cloud/automl/gapic_version.py new file mode 100644 index 00000000..360a0d13 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1/google/cloud/automl/py.typed b/owl-bot-staging/v1/google/cloud/automl/py.typed new file mode 100644 index 00000000..0560ba18 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/__init__.py new file mode 100644 index 00000000..eea87cfa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/__init__.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.automl_v1 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.auto_ml import AutoMlClient +from .services.auto_ml import AutoMlAsyncClient +from .services.prediction_service import PredictionServiceClient +from .services.prediction_service import PredictionServiceAsyncClient + +from .types.annotation_payload import AnnotationPayload +from .types.annotation_spec import AnnotationSpec +from .types.classification import ClassificationAnnotation +from .types.classification import ClassificationEvaluationMetrics +from .types.classification import ClassificationType +from .types.data_items import Document +from .types.data_items import DocumentDimensions +from .types.data_items import ExamplePayload +from .types.data_items import Image +from .types.data_items import TextSnippet +from .types.dataset import Dataset +from .types.detection import BoundingBoxMetricsEntry +from .types.detection import ImageObjectDetectionAnnotation +from .types.detection import ImageObjectDetectionEvaluationMetrics +from .types.geometry import BoundingPoly +from .types.geometry import NormalizedVertex +from .types.image import ImageClassificationDatasetMetadata +from .types.image import ImageClassificationModelDeploymentMetadata +from .types.image import ImageClassificationModelMetadata +from .types.image import ImageObjectDetectionDatasetMetadata +from .types.image import ImageObjectDetectionModelDeploymentMetadata +from .types.image import ImageObjectDetectionModelMetadata +from .types.io import BatchPredictInputConfig +from .types.io import BatchPredictOutputConfig +from .types.io import DocumentInputConfig +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.io import InputConfig +from .types.io import ModelExportOutputConfig +from .types.io import OutputConfig +from .types.model import Model +from .types.model_evaluation import ModelEvaluation +from .types.operations import BatchPredictOperationMetadata +from .types.operations import CreateDatasetOperationMetadata +from .types.operations import CreateModelOperationMetadata +from .types.operations import DeleteOperationMetadata +from .types.operations import DeployModelOperationMetadata +from .types.operations import ExportDataOperationMetadata +from .types.operations import ExportModelOperationMetadata +from .types.operations import ImportDataOperationMetadata +from .types.operations import OperationMetadata +from .types.operations import UndeployModelOperationMetadata +from .types.prediction_service import BatchPredictRequest +from .types.prediction_service import BatchPredictResult +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.service import CreateDatasetRequest +from .types.service import CreateModelRequest +from .types.service import DeleteDatasetRequest +from .types.service import DeleteModelRequest +from .types.service import DeployModelRequest +from .types.service import ExportDataRequest +from .types.service import ExportModelRequest +from .types.service import GetAnnotationSpecRequest +from .types.service import GetDatasetRequest +from .types.service import GetModelEvaluationRequest +from .types.service import GetModelRequest +from .types.service import ImportDataRequest +from .types.service import ListDatasetsRequest +from .types.service import ListDatasetsResponse +from .types.service import ListModelEvaluationsRequest +from .types.service import ListModelEvaluationsResponse +from .types.service import ListModelsRequest +from .types.service import ListModelsResponse +from .types.service import UndeployModelRequest +from .types.service import UpdateDatasetRequest +from .types.service import UpdateModelRequest +from .types.text import TextClassificationDatasetMetadata +from .types.text import TextClassificationModelMetadata +from .types.text import TextExtractionDatasetMetadata +from .types.text import TextExtractionModelMetadata +from .types.text import TextSentimentDatasetMetadata +from .types.text import TextSentimentModelMetadata +from .types.text_extraction import TextExtractionAnnotation +from .types.text_extraction import TextExtractionEvaluationMetrics +from .types.text_segment import TextSegment +from .types.text_sentiment import TextSentimentAnnotation +from .types.text_sentiment import TextSentimentEvaluationMetrics +from .types.translation import TranslationAnnotation +from .types.translation import TranslationDatasetMetadata +from .types.translation import TranslationEvaluationMetrics +from .types.translation import TranslationModelMetadata + +__all__ = ( + 'AutoMlAsyncClient', + 'PredictionServiceAsyncClient', +'AnnotationPayload', +'AnnotationSpec', +'AutoMlClient', +'BatchPredictInputConfig', +'BatchPredictOperationMetadata', +'BatchPredictOutputConfig', +'BatchPredictRequest', +'BatchPredictResult', +'BoundingBoxMetricsEntry', +'BoundingPoly', +'ClassificationAnnotation', +'ClassificationEvaluationMetrics', +'ClassificationType', +'CreateDatasetOperationMetadata', +'CreateDatasetRequest', +'CreateModelOperationMetadata', +'CreateModelRequest', +'Dataset', +'DeleteDatasetRequest', +'DeleteModelRequest', +'DeleteOperationMetadata', +'DeployModelOperationMetadata', +'DeployModelRequest', +'Document', +'DocumentDimensions', +'DocumentInputConfig', +'ExamplePayload', +'ExportDataOperationMetadata', +'ExportDataRequest', +'ExportModelOperationMetadata', +'ExportModelRequest', +'GcsDestination', +'GcsSource', +'GetAnnotationSpecRequest', +'GetDatasetRequest', +'GetModelEvaluationRequest', +'GetModelRequest', +'Image', +'ImageClassificationDatasetMetadata', +'ImageClassificationModelDeploymentMetadata', +'ImageClassificationModelMetadata', +'ImageObjectDetectionAnnotation', +'ImageObjectDetectionDatasetMetadata', +'ImageObjectDetectionEvaluationMetrics', +'ImageObjectDetectionModelDeploymentMetadata', +'ImageObjectDetectionModelMetadata', +'ImportDataOperationMetadata', +'ImportDataRequest', +'InputConfig', +'ListDatasetsRequest', +'ListDatasetsResponse', +'ListModelEvaluationsRequest', +'ListModelEvaluationsResponse', +'ListModelsRequest', +'ListModelsResponse', +'Model', +'ModelEvaluation', +'ModelExportOutputConfig', +'NormalizedVertex', +'OperationMetadata', +'OutputConfig', +'PredictRequest', +'PredictResponse', +'PredictionServiceClient', +'TextClassificationDatasetMetadata', +'TextClassificationModelMetadata', +'TextExtractionAnnotation', +'TextExtractionDatasetMetadata', +'TextExtractionEvaluationMetrics', +'TextExtractionModelMetadata', +'TextSegment', +'TextSentimentAnnotation', +'TextSentimentDatasetMetadata', +'TextSentimentEvaluationMetrics', +'TextSentimentModelMetadata', +'TextSnippet', +'TranslationAnnotation', +'TranslationDatasetMetadata', +'TranslationEvaluationMetrics', +'TranslationModelMetadata', +'UndeployModelOperationMetadata', +'UndeployModelRequest', +'UpdateDatasetRequest', +'UpdateModelRequest', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json new file mode 100644 index 00000000..7d017052 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json @@ -0,0 +1,347 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.automl_v1", + "protoPackage": "google.cloud.automl.v1", + "schema": "1.0", + "services": { + "AutoMl": { + "clients": { + "grpc": { + "libraryClient": "AutoMlClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "CreateModel": { + "methods": [ + "create_model" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + } + } + }, + "grpc-async": { + "libraryClient": "AutoMlAsyncClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "CreateModel": { + "methods": [ + "create_model" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + } + } + }, + "rest": { + "libraryClient": "AutoMlClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "CreateModel": { + "methods": [ + "create_model" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + } + } + } + } + }, + "PredictionService": { + "clients": { + "grpc": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "BatchPredict": { + "methods": [ + "batch_predict" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PredictionServiceAsyncClient", + "rpcs": { + "BatchPredict": { + "methods": [ + "batch_predict" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + }, + "rest": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "BatchPredict": { + "methods": [ + "batch_predict" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py b/owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py new file mode 100644 index 00000000..360a0d13 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/py.typed b/owl-bot-staging/v1/google/cloud/automl_v1/py.typed new file mode 100644 index 00000000..0560ba18 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py new file mode 100644 index 00000000..89a37dc9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py new file mode 100644 index 00000000..8f53357e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import AutoMlClient +from .async_client import AutoMlAsyncClient + +__all__ = ( + 'AutoMlClient', + 'AutoMlAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py new file mode 100644 index 00000000..5de13fa6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py @@ -0,0 +1,2507 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union + +from google.cloud.automl_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1.services.auto_ml import pagers +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import classification +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import service +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import text_extraction +from google.cloud.automl_v1.types import text_sentiment +from google.cloud.automl_v1.types import translation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport +from .client import AutoMlClient + + +class AutoMlAsyncClient: + """AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + """ + + _client: AutoMlClient + + DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT + + annotation_spec_path = staticmethod(AutoMlClient.annotation_spec_path) + parse_annotation_spec_path = staticmethod(AutoMlClient.parse_annotation_spec_path) + dataset_path = staticmethod(AutoMlClient.dataset_path) + parse_dataset_path = staticmethod(AutoMlClient.parse_dataset_path) + model_path = staticmethod(AutoMlClient.model_path) + parse_model_path = staticmethod(AutoMlClient.parse_model_path) + model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path) + parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path) + common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(AutoMlClient.parse_common_billing_account_path) + common_folder_path = staticmethod(AutoMlClient.common_folder_path) + parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path) + common_organization_path = staticmethod(AutoMlClient.common_organization_path) + parse_common_organization_path = staticmethod(AutoMlClient.parse_common_organization_path) + common_project_path = staticmethod(AutoMlClient.common_project_path) + parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path) + common_location_path = staticmethod(AutoMlClient.common_location_path) + parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_info.__func__(AutoMlAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_file.__func__(AutoMlAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return AutoMlClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> AutoMlTransport: + """Returns the transport used by the client instance. + + Returns: + AutoMlTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(AutoMlClient).get_transport_class, type(AutoMlClient)) + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AutoMlTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = AutoMlClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_dataset(self, + request: Optional[Union[service.CreateDatasetRequest, dict]] = None, + *, + parent: Optional[str] = None, + dataset: Optional[gca_dataset.Dataset] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_create_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.CreateDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the + project to create the dataset for. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`google.cloud.automl_v1.types.Dataset`): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.automl_v1.types.Dataset` A workspace for solving a single, particular machine learning (ML) problem. + A workspace contains examples that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_dataset.Dataset, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_dataset(self, + request: Optional[Union[service.GetDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_get_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.GetDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_datasets(self, + request: Optional[Union[service.ListDatasetsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists datasets in a project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_list_datasets(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.ListDatasetsRequest, dict]]): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + parent (:class:`str`): + Required. The resource name of the + project from which to list datasets. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsAsyncPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset(self, + request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, + *, + dataset: Optional[gca_dataset.Dataset] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_update_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.UpdateDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + dataset (:class:`google.cloud.automl_v1.types.Dataset`): + Required. The dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_dataset(self, + request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_delete_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.DeleteDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to delete. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data(self, + request: Optional[Union[service.ImportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.InputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_import_data(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + input_config = automl_v1.InputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + request = automl_v1.ImportDataRequest( + name="name_value", + input_config=input_config, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.ImportDataRequest, dict]]): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + name (:class:`str`): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`google.cloud.automl_v1.types.InputConfig`): + Required. The desired input location + and its domain specific semantics, if + any. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data(self, + request: Optional[Union[service.ExportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.OutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_export_data(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + output_config = automl_v1.OutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportDataRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.ExportDataRequest, dict]]): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + name (:class:`str`): + Required. The resource name of the + dataset. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.automl_v1.types.OutputConfig`): + Required. The desired output + location. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec(self, + request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_get_annotation_spec(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.GetAnnotationSpecRequest, dict]]): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. + name (:class:`str`): + Required. The resource name of the + annotation spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_model(self, + request: Optional[Union[service.CreateModelRequest, dict]] = None, + *, + parent: Optional[str] = None, + model: Optional[gca_model.Model] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_create_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.CreateModelRequest, dict]]): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + parent (:class:`str`): + Required. Resource name of the parent + project where the model is being + created. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`google.cloud.automl_v1.types.Model`): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.automl_v1.types.Model` API proto + representing a trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.CreateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model(self, + request: Optional[Union[service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_get_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.GetModelRequest, dict]]): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + name (:class:`str`): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models(self, + request: Optional[Union[service.ListModelsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists models. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_list_models(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.ListModelsRequest, dict]]): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + parent (:class:`str`): + Required. Resource name of the + project, from which to list the models. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelsAsyncPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model(self, + request: Optional[Union[service.DeleteModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_delete_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.DeleteModelRequest, dict]]): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + name (:class:`str`): + Required. Resource name of the model + being deleted. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def update_model(self, + request: Optional[Union[service.UpdateModelRequest, dict]] = None, + *, + model: Optional[gca_model.Model] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_update_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.UpdateModelRequest( + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.UpdateModelRequest, dict]]): + The request object. Request message for + [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + model (:class:`google.cloud.automl_v1.types.Model`): + Required. The model which replaces + the resource on the server. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.UpdateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def deploy_model(self, + request: Optional[Union[service.DeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_deploy_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.DeployModelRequest, dict]]): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + name (:class:`str`): + Required. Resource name of the model + to deploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model(self, + request: Optional[Union[service.UndeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_undeploy_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.UndeployModelRequest, dict]]): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + name (:class:`str`): + Required. Resource name of the model + to undeploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_model(self, + request: Optional[Union[service.ExportModelRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.ModelExportOutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_export_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + output_config = automl_v1.ModelExportOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportModelRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.ExportModelRequest, dict]]): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (:class:`str`): + Required. The resource name of the + model to export. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.automl_v1.types.ModelExportOutputConfig`): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation(self, + request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_get_model_evaluation(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.GetModelEvaluationRequest, dict]]): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. + name (:class:`str`): + Required. Resource name for the model + evaluation. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluations(self, + request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, + *, + parent: Optional[str] = None, + filter: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists model evaluations. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_list_model_evaluations(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.ListModelEvaluationsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.ListModelEvaluationsRequest, dict]]): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + parent (:class:`str`): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Required. An expression for filtering the results of the + request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation + was done for annotation spec with ID different than + 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation + was done for aggregate of all annotation specs. + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, filter]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "AutoMlAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "AutoMlAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py new file mode 100644 index 00000000..e91d6dee --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py @@ -0,0 +1,2675 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast + +from google.cloud.automl_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1.services.auto_ml import pagers +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import classification +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import service +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import text_extraction +from google.cloud.automl_v1.types import text_sentiment +from google.cloud.automl_v1.types import translation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import AutoMlGrpcTransport +from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport +from .transports.rest import AutoMlRestTransport + + +class AutoMlClientMeta(type): + """Metaclass for the AutoMl client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] + _transport_registry["grpc"] = AutoMlGrpcTransport + _transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport + _transport_registry["rest"] = AutoMlRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[AutoMlTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoMlClient(metaclass=AutoMlClientMeta): + """AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AutoMlTransport: + """Returns the transport used by the client instance. + + Returns: + AutoMlTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: + """Returns a fully-qualified annotation_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + + @staticmethod + def parse_annotation_spec_path(path: str) -> Dict[str,str]: + """Parses a annotation_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_path(project: str,location: str,model: str,model_evaluation: str,) -> str: + """Returns a fully-qualified model_evaluation string.""" + return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) + + @staticmethod + def parse_model_evaluation_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, AutoMlTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoMlTransport): + # transport is a AutoMlTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_dataset(self, + request: Optional[Union[service.CreateDatasetRequest, dict]] = None, + *, + parent: Optional[str] = None, + dataset: Optional[gca_dataset.Dataset] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_create_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.CreateDatasetRequest, dict]): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + parent (str): + Required. The resource name of the + project to create the dataset for. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (google.cloud.automl_v1.types.Dataset): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.automl_v1.types.Dataset` A workspace for solving a single, particular machine learning (ML) problem. + A workspace contains examples that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateDatasetRequest): + request = service.CreateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gca_dataset.Dataset, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_dataset(self, + request: Optional[Union[service.GetDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_get_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.GetDatasetRequest, dict]): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + name (str): + Required. The resource name of the + dataset to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetDatasetRequest): + request = service.GetDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_datasets(self, + request: Optional[Union[service.ListDatasetsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists datasets in a project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_list_datasets(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.ListDatasetsRequest, dict]): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + parent (str): + Required. The resource name of the + project from which to list datasets. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListDatasetsRequest): + request = service.ListDatasetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset(self, + request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, + *, + dataset: Optional[gca_dataset.Dataset] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_update_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.UpdateDatasetRequest, dict]): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + dataset (google.cloud.automl_v1.types.Dataset): + Required. The dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateDatasetRequest): + request = service.UpdateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_dataset(self, + request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_delete_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.DeleteDatasetRequest, dict]): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + name (str): + Required. The resource name of the + dataset to delete. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteDatasetRequest): + request = service.DeleteDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def import_data(self, + request: Optional[Union[service.ImportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.InputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_import_data(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + input_config = automl_v1.InputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + request = automl_v1.ImportDataRequest( + name="name_value", + input_config=input_config, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.ImportDataRequest, dict]): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + name (str): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (google.cloud.automl_v1.types.InputConfig): + Required. The desired input location + and its domain specific semantics, if + any. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ImportDataRequest): + request = service.ImportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_data(self, + request: Optional[Union[service.ExportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.OutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_export_data(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + output_config = automl_v1.OutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportDataRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.ExportDataRequest, dict]): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + name (str): + Required. The resource name of the + dataset. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.automl_v1.types.OutputConfig): + Required. The desired output + location. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportDataRequest): + request = service.ExportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec(self, + request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_get_annotation_spec(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.GetAnnotationSpecRequest, dict]): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. + name (str): + Required. The resource name of the + annotation spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetAnnotationSpecRequest): + request = service.GetAnnotationSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_model(self, + request: Optional[Union[service.CreateModelRequest, dict]] = None, + *, + parent: Optional[str] = None, + model: Optional[gca_model.Model] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_create_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.CreateModelRequest, dict]): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + parent (str): + Required. Resource name of the parent + project where the model is being + created. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (google.cloud.automl_v1.types.Model): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.automl_v1.types.Model` API proto + representing a trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateModelRequest): + request = service.CreateModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model(self, + request: Optional[Union[service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_get_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.GetModelRequest, dict]): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + name (str): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelRequest): + request = service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models(self, + request: Optional[Union[service.ListModelsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists models. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_list_models(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.ListModelsRequest, dict]): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + parent (str): + Required. Resource name of the + project, from which to list the models. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelsPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelsRequest): + request = service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model(self, + request: Optional[Union[service.DeleteModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_delete_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.DeleteModelRequest, dict]): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + name (str): + Required. Resource name of the model + being deleted. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteModelRequest): + request = service.DeleteModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def update_model(self, + request: Optional[Union[service.UpdateModelRequest, dict]] = None, + *, + model: Optional[gca_model.Model] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_update_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.UpdateModelRequest( + ) + + # Make the request + response = client.update_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.UpdateModelRequest, dict]): + The request object. Request message for + [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + model (google.cloud.automl_v1.types.Model): + Required. The model which replaces + the resource on the server. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateModelRequest): + request = service.UpdateModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def deploy_model(self, + request: Optional[Union[service.DeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_deploy_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.DeployModelRequest, dict]): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + name (str): + Required. Resource name of the model + to deploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeployModelRequest): + request = service.DeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model(self, + request: Optional[Union[service.UndeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_undeploy_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.UndeployModelRequest, dict]): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + name (str): + Required. Resource name of the model + to undeploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UndeployModelRequest): + request = service.UndeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_model(self, + request: Optional[Union[service.ExportModelRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.ModelExportOutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_export_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + output_config = automl_v1.ModelExportOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportModelRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.ExportModelRequest, dict]): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (str): + Required. The resource name of the + model to export. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.automl_v1.types.ModelExportOutputConfig): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportModelRequest): + request = service.ExportModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation(self, + request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_get_model_evaluation(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.GetModelEvaluationRequest, dict]): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. + name (str): + Required. Resource name for the model + evaluation. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelEvaluationRequest): + request = service.GetModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluations(self, + request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, + *, + parent: Optional[str] = None, + filter: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists model evaluations. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_list_model_evaluations(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.ListModelEvaluationsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.ListModelEvaluationsRequest, dict]): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + parent (str): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (str): + Required. An expression for filtering the results of the + request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation + was done for annotation spec with ID different than + 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation + was done for aggregate of all annotation specs. + + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, filter]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelEvaluationsRequest): + request = service.ListModelEvaluationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "AutoMlClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + + + + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "AutoMlClient", +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py new file mode 100644 index 00000000..017e6bd6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListDatasetsResponse], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.automl_v1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListDatasetsResponse]], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.automl_v1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListModelsResponse], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1.types.ListModelsRequest): + The initial request object. + response (google.cloud.automl_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.model + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListModelsResponse]], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1.types.ListModelsRequest): + The initial request object. + response (google.cloud.automl_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.model: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListModelEvaluationsResponse], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.automl_v1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluation + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListModelEvaluationsResponse]], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.automl_v1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py new file mode 100644 index 00000000..9d86479d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoMlTransport +from .grpc import AutoMlGrpcTransport +from .grpc_asyncio import AutoMlGrpcAsyncIOTransport +from .rest import AutoMlRestTransport +from .rest import AutoMlRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] +_transport_registry['grpc'] = AutoMlGrpcTransport +_transport_registry['grpc_asyncio'] = AutoMlGrpcAsyncIOTransport +_transport_registry['rest'] = AutoMlRestTransport + +__all__ = ( + 'AutoMlTransport', + 'AutoMlGrpcTransport', + 'AutoMlGrpcAsyncIOTransport', + 'AutoMlRestTransport', + 'AutoMlRestInterceptor', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py new file mode 100644 index 00000000..8c7d2bf9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py @@ -0,0 +1,462 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.automl_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class AutoMlTransport(abc.ABC): + """Abstract transport class for AutoMl.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'automl.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, + default_timeout=5.0, + client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, + default_timeout=5.0, + client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.create_model: gapic_v1.method.wrap_method( + self.create_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_model: gapic_v1.method.wrap_method( + self.update_model, + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, + default_timeout=5.0, + client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, + default_timeout=5.0, + client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + Union[ + dataset.Dataset, + Awaitable[dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + Union[ + service.ListDatasetsResponse, + Awaitable[service.ListDatasetsResponse] + ]]: + raise NotImplementedError() + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + Union[ + gca_dataset.Dataset, + Awaitable[gca_dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + Union[ + annotation_spec.AnnotationSpec, + Awaitable[annotation_spec.AnnotationSpec] + ]]: + raise NotImplementedError() + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + Union[ + service.ListModelsResponse, + Awaitable[service.ListModelsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_model(self) -> Callable[ + [service.UpdateModelRequest], + Union[ + gca_model.Model, + Awaitable[gca_model.Model] + ]]: + raise NotImplementedError() + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + Union[ + model_evaluation.ModelEvaluation, + Awaitable[model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + Union[ + service.ListModelEvaluationsResponse, + Awaitable[service.ListModelEvaluationsResponse] + ]]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'AutoMlTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py new file mode 100644 index 00000000..c77a051e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py @@ -0,0 +1,796 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service +from google.longrunning import operations_pb2 # type: ignore +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO + + +class AutoMlGrpcTransport(AutoMlTransport): + """gRPC backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/CreateDataset', + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetDataset', + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ListDatasets', + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/UpdateDataset', + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/DeleteDataset', + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ImportData', + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ExportData', + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetAnnotationSpec', + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model' not in self._stubs: + self._stubs['create_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/CreateModel', + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_model'] + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetModel', + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ListModels', + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/DeleteModel', + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def update_model(self) -> Callable[ + [service.UpdateModelRequest], + gca_model.Model]: + r"""Return a callable for the update model method over gRPC. + + Updates a model. + + Returns: + Callable[[~.UpdateModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/UpdateModel', + request_serializer=service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/DeployModel', + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/UndeployModel', + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ExportModel', + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetModelEvaluation', + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + service.ListModelEvaluationsResponse]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ListModelEvaluations', + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'AutoMlGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py new file mode 100644 index 00000000..0d68fd82 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py @@ -0,0 +1,795 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service +from google.longrunning import operations_pb2 # type: ignore +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .grpc import AutoMlGrpcTransport + + +class AutoMlGrpcAsyncIOTransport(AutoMlTransport): + """gRPC AsyncIO backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/CreateDataset', + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetDataset', + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + Awaitable[service.ListDatasetsResponse]]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ListDatasets', + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/UpdateDataset', + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/DeleteDataset', + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ImportData', + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ExportData', + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + Awaitable[annotation_spec.AnnotationSpec]]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetAnnotationSpec', + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model' not in self._stubs: + self._stubs['create_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/CreateModel', + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_model'] + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetModel', + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + Awaitable[service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ListModels', + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/DeleteModel', + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def update_model(self) -> Callable[ + [service.UpdateModelRequest], + Awaitable[gca_model.Model]]: + r"""Return a callable for the update model method over gRPC. + + Updates a model. + + Returns: + Callable[[~.UpdateModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/UpdateModel', + request_serializer=service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/DeployModel', + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/UndeployModel', + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ExportModel', + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + Awaitable[model_evaluation.ModelEvaluation]]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/GetModelEvaluation', + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + Awaitable[service.ListModelEvaluationsResponse]]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.AutoMl/ListModelEvaluations', + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ( + 'AutoMlGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py new file mode 100644 index 00000000..017da644 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py @@ -0,0 +1,2366 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import service +from google.longrunning import operations_pb2 # type: ignore + +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class AutoMlRestInterceptor: + """Interceptor for AutoMl. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the AutoMlRestTransport. + + .. code-block:: python + class MyCustomAutoMlInterceptor(AutoMlRestInterceptor): + def pre_create_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_deploy_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_deploy_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_export_data(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_export_data(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_export_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_export_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_annotation_spec(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_annotation_spec(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_model_evaluation(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model_evaluation(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_import_data(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_import_data(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_datasets(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_datasets(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_model_evaluations(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_model_evaluations(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_models(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_models(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_undeploy_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_undeploy_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_model(self, response): + logging.log(f"Received response: {response}") + return response + + transport = AutoMlRestTransport(interceptor=MyCustomAutoMlInterceptor()) + client = AutoMlClient(transport=transport) + + + """ + def pre_create_dataset(self, request: service.CreateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_create_dataset(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_create_model(self, request: service.CreateModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_create_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_delete_dataset(self, request: service.DeleteDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_delete_dataset(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_delete_model(self, request: service.DeleteModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_delete_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_deploy_model(self, request: service.DeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for deploy_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_deploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for deploy_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_export_data(self, request: service.ExportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for export_data + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_export_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for export_data + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_export_model(self, request: service.ExportModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for export_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_export_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for export_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_annotation_spec(self, request: service.GetAnnotationSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetAnnotationSpecRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_annotation_spec + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_annotation_spec(self, response: annotation_spec.AnnotationSpec) -> annotation_spec.AnnotationSpec: + """Post-rpc interceptor for get_annotation_spec + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_dataset(self, request: service.GetDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: + """Post-rpc interceptor for get_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_model(self, request: service.GetModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_model(self, response: model.Model) -> model.Model: + """Post-rpc interceptor for get_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_model_evaluation(self, request: service.GetModelEvaluationRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelEvaluationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_model_evaluation + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_model_evaluation(self, response: model_evaluation.ModelEvaluation) -> model_evaluation.ModelEvaluation: + """Post-rpc interceptor for get_model_evaluation + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_import_data(self, request: service.ImportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for import_data + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_import_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for import_data + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_datasets(self, request: service.ListDatasetsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_datasets + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_datasets(self, response: service.ListDatasetsResponse) -> service.ListDatasetsResponse: + """Post-rpc interceptor for list_datasets + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_model_evaluations(self, request: service.ListModelEvaluationsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelEvaluationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_model_evaluations + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_model_evaluations(self, response: service.ListModelEvaluationsResponse) -> service.ListModelEvaluationsResponse: + """Post-rpc interceptor for list_model_evaluations + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_models(self, request: service.ListModelsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_models + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_models(self, response: service.ListModelsResponse) -> service.ListModelsResponse: + """Post-rpc interceptor for list_models + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_undeploy_model(self, request: service.UndeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for undeploy_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_undeploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for undeploy_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_update_dataset(self, request: service.UpdateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_update_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Dataset: + """Post-rpc interceptor for update_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_update_model(self, request: service.UpdateModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_update_model(self, response: gca_model.Model) -> gca_model.Model: + """Post-rpc interceptor for update_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class AutoMlRestStub: + _session: AuthorizedSession + _host: str + _interceptor: AutoMlRestInterceptor + + +class AutoMlRestTransport(AutoMlTransport): + """REST backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[AutoMlRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or AutoMlRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + 'google.longrunning.Operations.CancelOperation': [ + { + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}:cancel', + 'body': '*', + }, + ], + 'google.longrunning.Operations.DeleteOperation': [ + { + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.GetOperation': [ + { + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.ListOperations': [ + { + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*}/operations', + }, + ], + 'google.longrunning.Operations.WaitOperation': [ + { + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}:wait', + 'body': '*', + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1") + + self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) + + # Return the client from cache. + return self._operations_client + + class _CreateDataset(AutoMlRestStub): + def __hash__(self): + return hash("CreateDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.CreateDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create dataset method over HTTP. + + Args: + request (~.service.CreateDatasetRequest): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{parent=projects/*/locations/*}/datasets', + 'body': 'dataset', + }, + ] + request, metadata = self._interceptor.pre_create_dataset(request, metadata) + pb_request = service.CreateDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_dataset(resp) + return resp + + class _CreateModel(AutoMlRestStub): + def __hash__(self): + return hash("CreateModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.CreateModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create model method over HTTP. + + Args: + request (~.service.CreateModelRequest): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{parent=projects/*/locations/*}/models', + 'body': 'model', + }, + ] + request, metadata = self._interceptor.pre_create_model(request, metadata) + pb_request = service.CreateModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_model(resp) + return resp + + class _DeleteDataset(AutoMlRestStub): + def __hash__(self): + return hash("DeleteDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.DeleteDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the delete dataset method over HTTP. + + Args: + request (~.service.DeleteDatasetRequest): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/datasets/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_dataset(request, metadata) + pb_request = service.DeleteDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_dataset(resp) + return resp + + class _DeleteModel(AutoMlRestStub): + def __hash__(self): + return hash("DeleteModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.DeleteModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the delete model method over HTTP. + + Args: + request (~.service.DeleteModelRequest): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/models/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_model(request, metadata) + pb_request = service.DeleteModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_model(resp) + return resp + + class _DeployModel(AutoMlRestStub): + def __hash__(self): + return hash("DeployModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.DeployModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the deploy model method over HTTP. + + Args: + request (~.service.DeployModelRequest): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/models/*}:deploy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_deploy_model(request, metadata) + pb_request = service.DeployModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_deploy_model(resp) + return resp + + class _ExportData(AutoMlRestStub): + def __hash__(self): + return hash("ExportData") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ExportDataRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the export data method over HTTP. + + Args: + request (~.service.ExportDataRequest): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/datasets/*}:exportData', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_export_data(request, metadata) + pb_request = service.ExportDataRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_data(resp) + return resp + + class _ExportModel(AutoMlRestStub): + def __hash__(self): + return hash("ExportModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ExportModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the export model method over HTTP. + + Args: + request (~.service.ExportModelRequest): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/models/*}:export', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_export_model(request, metadata) + pb_request = service.ExportModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_model(resp) + return resp + + class _GetAnnotationSpec(AutoMlRestStub): + def __hash__(self): + return hash("GetAnnotationSpec") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetAnnotationSpecRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> annotation_spec.AnnotationSpec: + r"""Call the get annotation spec method over HTTP. + + Args: + request (~.service.GetAnnotationSpecRequest): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.annotation_spec.AnnotationSpec: + A definition of an annotation spec. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}', + }, + ] + request, metadata = self._interceptor.pre_get_annotation_spec(request, metadata) + pb_request = service.GetAnnotationSpecRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = annotation_spec.AnnotationSpec() + pb_resp = annotation_spec.AnnotationSpec.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_annotation_spec(resp) + return resp + + class _GetDataset(AutoMlRestStub): + def __hash__(self): + return hash("GetDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> dataset.Dataset: + r"""Call the get dataset method over HTTP. + + Args: + request (~.service.GetDatasetRequest): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/datasets/*}', + }, + ] + request, metadata = self._interceptor.pre_get_dataset(request, metadata) + pb_request = service.GetDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = dataset.Dataset() + pb_resp = dataset.Dataset.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_dataset(resp) + return resp + + class _GetModel(AutoMlRestStub): + def __hash__(self): + return hash("GetModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> model.Model: + r"""Call the get model method over HTTP. + + Args: + request (~.service.GetModelRequest): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + API proto representing a trained + machine learning model. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/models/*}', + }, + ] + request, metadata = self._interceptor.pre_get_model(request, metadata) + pb_request = service.GetModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model.Model() + pb_resp = model.Model.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model(resp) + return resp + + class _GetModelEvaluation(AutoMlRestStub): + def __hash__(self): + return hash("GetModelEvaluation") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetModelEvaluationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> model_evaluation.ModelEvaluation: + r"""Call the get model evaluation method over HTTP. + + Args: + request (~.service.GetModelEvaluationRequest): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_evaluation.ModelEvaluation: + Evaluation results of a model. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}', + }, + ] + request, metadata = self._interceptor.pre_get_model_evaluation(request, metadata) + pb_request = service.GetModelEvaluationRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_evaluation.ModelEvaluation() + pb_resp = model_evaluation.ModelEvaluation.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model_evaluation(resp) + return resp + + class _ImportData(AutoMlRestStub): + def __hash__(self): + return hash("ImportData") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ImportDataRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the import data method over HTTP. + + Args: + request (~.service.ImportDataRequest): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/datasets/*}:importData', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_import_data(request, metadata) + pb_request = service.ImportDataRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_import_data(resp) + return resp + + class _ListDatasets(AutoMlRestStub): + def __hash__(self): + return hash("ListDatasets") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListDatasetsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListDatasetsResponse: + r"""Call the list datasets method over HTTP. + + Args: + request (~.service.ListDatasetsRequest): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListDatasetsResponse: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{parent=projects/*/locations/*}/datasets', + }, + ] + request, metadata = self._interceptor.pre_list_datasets(request, metadata) + pb_request = service.ListDatasetsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListDatasetsResponse() + pb_resp = service.ListDatasetsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_datasets(resp) + return resp + + class _ListModelEvaluations(AutoMlRestStub): + def __hash__(self): + return hash("ListModelEvaluations") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "filter" : "", } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListModelEvaluationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListModelEvaluationsResponse: + r"""Call the list model evaluations method over HTTP. + + Args: + request (~.service.ListModelEvaluationsRequest): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListModelEvaluationsResponse: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations', + }, + ] + request, metadata = self._interceptor.pre_list_model_evaluations(request, metadata) + pb_request = service.ListModelEvaluationsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListModelEvaluationsResponse() + pb_resp = service.ListModelEvaluationsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_model_evaluations(resp) + return resp + + class _ListModels(AutoMlRestStub): + def __hash__(self): + return hash("ListModels") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListModelsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListModelsResponse: + r"""Call the list models method over HTTP. + + Args: + request (~.service.ListModelsRequest): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListModelsResponse: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{parent=projects/*/locations/*}/models', + }, + ] + request, metadata = self._interceptor.pre_list_models(request, metadata) + pb_request = service.ListModelsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListModelsResponse() + pb_resp = service.ListModelsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_models(resp) + return resp + + class _UndeployModel(AutoMlRestStub): + def __hash__(self): + return hash("UndeployModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.UndeployModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the undeploy model method over HTTP. + + Args: + request (~.service.UndeployModelRequest): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/models/*}:undeploy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_undeploy_model(request, metadata) + pb_request = service.UndeployModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undeploy_model(resp) + return resp + + class _UpdateDataset(AutoMlRestStub): + def __hash__(self): + return hash("UpdateDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.UpdateDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> gca_dataset.Dataset: + r"""Call the update dataset method over HTTP. + + Args: + request (~.service.UpdateDatasetRequest): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1/{dataset.name=projects/*/locations/*/datasets/*}', + 'body': 'dataset', + }, + ] + request, metadata = self._interceptor.pre_update_dataset(request, metadata) + pb_request = service.UpdateDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_dataset.Dataset() + pb_resp = gca_dataset.Dataset.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_dataset(resp) + return resp + + class _UpdateModel(AutoMlRestStub): + def __hash__(self): + return hash("UpdateModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.UpdateModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> gca_model.Model: + r"""Call the update model method over HTTP. + + Args: + request (~.service.UpdateModelRequest): + The request object. Request message for + [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_model.Model: + API proto representing a trained + machine learning model. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1/{model.name=projects/*/locations/*/models/*}', + 'body': 'model', + }, + ] + request, metadata = self._interceptor.pre_update_model(request, metadata) + pb_request = service.UpdateModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_model.Model() + pb_resp = gca_model.Model.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_model(resp) + return resp + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeployModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExportData(self._session, self._host, self._interceptor) # type: ignore + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExportModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAnnotationSpec(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + dataset.Dataset]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + model.Model]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModelEvaluation(self._session, self._host, self._interceptor) # type: ignore + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ImportData(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + service.ListDatasetsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDatasets(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + service.ListModelEvaluationsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModelEvaluations(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + service.ListModelsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModels(self._session, self._host, self._interceptor) # type: ignore + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UndeployModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + gca_dataset.Dataset]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_model(self) -> Callable[ + [service.UpdateModelRequest], + gca_model.Model]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'AutoMlRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py new file mode 100644 index 00000000..905b8c43 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient + +__all__ = ( + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py new file mode 100644 index 00000000..86342cd0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py @@ -0,0 +1,656 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union + +from google.cloud.automl_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import prediction_service +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + """ + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(PredictionServiceClient.model_path) + parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) + common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PredictionServiceClient.common_project_path) + parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) + common_location_path = staticmethod(PredictionServiceClient.common_location_path) + parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PredictionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def predict(self, + request: Optional[Union[prediction_service.PredictRequest, dict]] = None, + *, + name: Optional[str] = None, + payload: Optional[data_items.ExamplePayload] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_predict(): + # Create a client + client = automl_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + payload = automl_v1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.PredictRequest, dict]]): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + name (:class:`str`): + Required. Name of the model requested + to serve the prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (:class:`google.cloud.automl_v1.types.ExamplePayload`): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`MutableMapping[str, str]`): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned. The default is 100. The + number of returned bounding boxes might be limited by + the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] + is populated in the returned list of + [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] + objects. The default is false. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, payload, params]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + + if params: + request.params.update(params) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_predict(self, + request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.BatchPredictInputConfig] = None, + output_config: Optional[io.BatchPredictOutputConfig] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + async def sample_batch_predict(): + # Create a client + client = automl_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + input_config = automl_v1.BatchPredictInputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + output_config = automl_v1.BatchPredictOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.BatchPredictRequest( + name="name_value", + input_config=input_config, + output_config=output_config, + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1.types.BatchPredictRequest, dict]]): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + name (:class:`str`): + Required. Name of the model requested + to serve the batch prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`google.cloud.automl_v1.types.BatchPredictInputConfig`): + Required. The input configuration for + batch prediction. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.automl_v1.types.BatchPredictOutputConfig`): + Required. The Configuration + specifying where output predictions + should be written. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`MutableMapping[str, str]`): + Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a text snippet, it + will only produce results that have at least this + confidence score. The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. AutoML Video Intelligence + Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a video, it will + only produce results that have at least this confidence + score. The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence scores + for the entire segment of the video that user specified + in the request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to + request shot-level classification. AutoML Video + Intelligence determines the boundaries for each camera + shot in the entire segment of the video that user + specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence + scores for each detected shot, along with the start and + end time of the shot. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + ``1s_interval_classification`` : (boolean) Set to true + to request classification for a video at one-second + intervals. AutoML Video Intelligence returns labels and + their confidence scores for each second of the entire + segment of the video that user specified in the request + configuration. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + AutoML Video Intelligence Object Tracking + + ``score_threshold`` : (float) When Model detects objects + on video frames, it will only produce bounding boxes + which have at least this confidence score. Value in 0 to + 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. + + ``min_bounding_box_size`` : (float) Only bounding boxes + with shortest edge at least that long as a relative + value of video frame size are returned. Value in 0 to 1 + range. Default is 0. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.automl_v1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config, output_config, params]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.BatchPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + + if params: + request.params.update(params) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "PredictionServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "PredictionServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py new file mode 100644 index 00000000..96c0c3bf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py @@ -0,0 +1,858 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast + +from google.cloud.automl_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import prediction_service +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .transports.rest import PredictionServiceRestTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + _transport_registry["rest"] = PredictionServiceRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[PredictionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, PredictionServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def predict(self, + request: Optional[Union[prediction_service.PredictRequest, dict]] = None, + *, + name: Optional[str] = None, + payload: Optional[data_items.ExamplePayload] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_predict(): + # Create a client + client = automl_v1.PredictionServiceClient() + + # Initialize request argument(s) + payload = automl_v1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.PredictRequest, dict]): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + name (str): + Required. Name of the model requested + to serve the prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (google.cloud.automl_v1.types.ExamplePayload): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (MutableMapping[str, str]): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned. The default is 100. The + number of returned bounding boxes might be limited by + the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] + is populated in the returned list of + [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] + objects. The default is false. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, payload, params]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_predict(self, + request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.BatchPredictInputConfig] = None, + output_config: Optional[io.BatchPredictOutputConfig] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1 + + def sample_batch_predict(): + # Create a client + client = automl_v1.PredictionServiceClient() + + # Initialize request argument(s) + input_config = automl_v1.BatchPredictInputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + output_config = automl_v1.BatchPredictOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.BatchPredictRequest( + name="name_value", + input_config=input_config, + output_config=output_config, + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1.types.BatchPredictRequest, dict]): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + name (str): + Required. Name of the model requested + to serve the batch prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): + Required. The input configuration for + batch prediction. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.automl_v1.types.BatchPredictOutputConfig): + Required. The Configuration + specifying where output predictions + should be written. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (MutableMapping[str, str]): + Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a text snippet, it + will only produce results that have at least this + confidence score. The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. AutoML Video Intelligence + Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. + When the model makes predictions for a video, it will + only produce results that have at least this confidence + score. The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence scores + for the entire segment of the video that user specified + in the request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to + request shot-level classification. AutoML Video + Intelligence determines the boundaries for each camera + shot in the entire segment of the video that user + specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence + scores for each detected shot, along with the start and + end time of the shot. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + ``1s_interval_classification`` : (boolean) Set to true + to request classification for a video at one-second + intervals. AutoML Video Intelligence returns labels and + their confidence scores for each second of the entire + segment of the video that user specified in the request + configuration. The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. + + AutoML Video Intelligence Object Tracking + + ``score_threshold`` : (float) When Model detects objects + on video frames, it will only produce bounding boxes + which have at least this confidence score. Value in 0 to + 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number + of bounding boxes returned per image. The default is + 100, the number of bounding boxes returned might be + limited by the server. + + ``min_bounding_box_size`` : (float) Only bounding boxes + with shortest edge at least that long as a relative + value of video frame size are returned. Value in 0 to 1 + range. Default is 0. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.automl_v1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config, output_config, params]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.BatchPredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.BatchPredictRequest): + request = prediction_service.BatchPredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "PredictionServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + + + + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "PredictionServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py new file mode 100644 index 00000000..d8c81688 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .rest import PredictionServiceRestTransport +from .rest import PredictionServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry['grpc'] = PredictionServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport +_transport_registry['rest'] = PredictionServiceRestTransport + +__all__ = ( + 'PredictionServiceTransport', + 'PredictionServiceGrpcTransport', + 'PredictionServiceGrpcAsyncIOTransport', + 'PredictionServiceRestTransport', + 'PredictionServiceRestInterceptor', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py new file mode 100644 index 00000000..609956a0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.automl_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.automl_v1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'automl.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, + default_timeout=60.0, + client_info=client_info, + ), + self.batch_predict: gapic_v1.method.wrap_method( + self.batch_predict, + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Union[ + prediction_service.PredictResponse, + Awaitable[prediction_service.PredictResponse] + ]]: + raise NotImplementedError() + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'PredictionServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py new file mode 100644 index 00000000..982858db --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.automl_v1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + Returns: + Callable[[~.BatchPredictRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_predict' not in self._stubs: + self._stubs['batch_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.PredictionService/BatchPredict', + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_predict'] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'PredictionServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..2bb725ec --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,366 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse]]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, + and their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to + 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a + document in .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, + up to 5MB. Not available for FORECASTING ``prediction_type``. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural + Language Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables + + Returns: + Callable[[~.BatchPredictRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_predict' not in self._stubs: + self._stubs['batch_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1.PredictionService/BatchPredict', + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_predict'] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ( + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py new file mode 100644 index 00000000..86096e21 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py @@ -0,0 +1,484 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.automl_v1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class PredictionServiceRestInterceptor: + """Interceptor for PredictionService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the PredictionServiceRestTransport. + + .. code-block:: python + class MyCustomPredictionServiceInterceptor(PredictionServiceRestInterceptor): + def pre_batch_predict(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_predict(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_predict(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_predict(self, response): + logging.log(f"Received response: {response}") + return response + + transport = PredictionServiceRestTransport(interceptor=MyCustomPredictionServiceInterceptor()) + client = PredictionServiceClient(transport=transport) + + + """ + def pre_batch_predict(self, request: prediction_service.BatchPredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.BatchPredictRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_predict + + Override in a subclass to manipulate the request or metadata + before they are sent to the PredictionService server. + """ + return request, metadata + + def post_batch_predict(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for batch_predict + + Override in a subclass to manipulate the response + after it is returned by the PredictionService server but before + it is returned to user code. + """ + return response + def pre_predict(self, request: prediction_service.PredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.PredictRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for predict + + Override in a subclass to manipulate the request or metadata + before they are sent to the PredictionService server. + """ + return request, metadata + + def post_predict(self, response: prediction_service.PredictResponse) -> prediction_service.PredictResponse: + """Post-rpc interceptor for predict + + Override in a subclass to manipulate the response + after it is returned by the PredictionService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class PredictionServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: PredictionServiceRestInterceptor + + +class PredictionServiceRestTransport(PredictionServiceTransport): + """REST backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or dash-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[PredictionServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or PredictionServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + 'google.longrunning.Operations.CancelOperation': [ + { + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}:cancel', + 'body': '*', + }, + ], + 'google.longrunning.Operations.DeleteOperation': [ + { + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.GetOperation': [ + { + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.ListOperations': [ + { + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*}/operations', + }, + ], + 'google.longrunning.Operations.WaitOperation': [ + { + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}:wait', + 'body': '*', + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1") + + self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) + + # Return the client from cache. + return self._operations_client + + class _BatchPredict(PredictionServiceRestStub): + def __hash__(self): + return hash("BatchPredict") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: prediction_service.BatchPredictRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the batch predict method over HTTP. + + Args: + request (~.prediction_service.BatchPredictRequest): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/models/*}:batchPredict', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_batch_predict(request, metadata) + pb_request = prediction_service.BatchPredictRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_predict(resp) + return resp + + class _Predict(PredictionServiceRestStub): + def __hash__(self): + return hash("Predict") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: prediction_service.PredictRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> prediction_service.PredictResponse: + r"""Call the predict method over HTTP. + + Args: + request (~.prediction_service.PredictRequest): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.prediction_service.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/models/*}:predict', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_predict(request, metadata) + pb_request = prediction_service.PredictRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = prediction_service.PredictResponse() + pb_resp = prediction_service.PredictResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_predict(resp) + return resp + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchPredict(self._session, self._host, self._interceptor) # type: ignore + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._Predict(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'PredictionServiceRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py new file mode 100644 index 00000000..a94ad111 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .annotation_payload import ( + AnnotationPayload, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .classification import ( + ClassificationAnnotation, + ClassificationEvaluationMetrics, + ClassificationType, +) +from .data_items import ( + Document, + DocumentDimensions, + ExamplePayload, + Image, + TextSnippet, +) +from .dataset import ( + Dataset, +) +from .detection import ( + BoundingBoxMetricsEntry, + ImageObjectDetectionAnnotation, + ImageObjectDetectionEvaluationMetrics, +) +from .geometry import ( + BoundingPoly, + NormalizedVertex, +) +from .image import ( + ImageClassificationDatasetMetadata, + ImageClassificationModelDeploymentMetadata, + ImageClassificationModelMetadata, + ImageObjectDetectionDatasetMetadata, + ImageObjectDetectionModelDeploymentMetadata, + ImageObjectDetectionModelMetadata, +) +from .io import ( + BatchPredictInputConfig, + BatchPredictOutputConfig, + DocumentInputConfig, + GcsDestination, + GcsSource, + InputConfig, + ModelExportOutputConfig, + OutputConfig, +) +from .model import ( + Model, +) +from .model_evaluation import ( + ModelEvaluation, +) +from .operations import ( + BatchPredictOperationMetadata, + CreateDatasetOperationMetadata, + CreateModelOperationMetadata, + DeleteOperationMetadata, + DeployModelOperationMetadata, + ExportDataOperationMetadata, + ExportModelOperationMetadata, + ImportDataOperationMetadata, + OperationMetadata, + UndeployModelOperationMetadata, +) +from .prediction_service import ( + BatchPredictRequest, + BatchPredictResult, + PredictRequest, + PredictResponse, +) +from .service import ( + CreateDatasetRequest, + CreateModelRequest, + DeleteDatasetRequest, + DeleteModelRequest, + DeployModelRequest, + ExportDataRequest, + ExportModelRequest, + GetAnnotationSpecRequest, + GetDatasetRequest, + GetModelEvaluationRequest, + GetModelRequest, + ImportDataRequest, + ListDatasetsRequest, + ListDatasetsResponse, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, + ListModelsRequest, + ListModelsResponse, + UndeployModelRequest, + UpdateDatasetRequest, + UpdateModelRequest, +) +from .text import ( + TextClassificationDatasetMetadata, + TextClassificationModelMetadata, + TextExtractionDatasetMetadata, + TextExtractionModelMetadata, + TextSentimentDatasetMetadata, + TextSentimentModelMetadata, +) +from .text_extraction import ( + TextExtractionAnnotation, + TextExtractionEvaluationMetrics, +) +from .text_segment import ( + TextSegment, +) +from .text_sentiment import ( + TextSentimentAnnotation, + TextSentimentEvaluationMetrics, +) +from .translation import ( + TranslationAnnotation, + TranslationDatasetMetadata, + TranslationEvaluationMetrics, + TranslationModelMetadata, +) + +__all__ = ( + 'AnnotationPayload', + 'AnnotationSpec', + 'ClassificationAnnotation', + 'ClassificationEvaluationMetrics', + 'ClassificationType', + 'Document', + 'DocumentDimensions', + 'ExamplePayload', + 'Image', + 'TextSnippet', + 'Dataset', + 'BoundingBoxMetricsEntry', + 'ImageObjectDetectionAnnotation', + 'ImageObjectDetectionEvaluationMetrics', + 'BoundingPoly', + 'NormalizedVertex', + 'ImageClassificationDatasetMetadata', + 'ImageClassificationModelDeploymentMetadata', + 'ImageClassificationModelMetadata', + 'ImageObjectDetectionDatasetMetadata', + 'ImageObjectDetectionModelDeploymentMetadata', + 'ImageObjectDetectionModelMetadata', + 'BatchPredictInputConfig', + 'BatchPredictOutputConfig', + 'DocumentInputConfig', + 'GcsDestination', + 'GcsSource', + 'InputConfig', + 'ModelExportOutputConfig', + 'OutputConfig', + 'Model', + 'ModelEvaluation', + 'BatchPredictOperationMetadata', + 'CreateDatasetOperationMetadata', + 'CreateModelOperationMetadata', + 'DeleteOperationMetadata', + 'DeployModelOperationMetadata', + 'ExportDataOperationMetadata', + 'ExportModelOperationMetadata', + 'ImportDataOperationMetadata', + 'OperationMetadata', + 'UndeployModelOperationMetadata', + 'BatchPredictRequest', + 'BatchPredictResult', + 'PredictRequest', + 'PredictResponse', + 'CreateDatasetRequest', + 'CreateModelRequest', + 'DeleteDatasetRequest', + 'DeleteModelRequest', + 'DeployModelRequest', + 'ExportDataRequest', + 'ExportModelRequest', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'GetModelEvaluationRequest', + 'GetModelRequest', + 'ImportDataRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'UndeployModelRequest', + 'UpdateDatasetRequest', + 'UpdateModelRequest', + 'TextClassificationDatasetMetadata', + 'TextClassificationModelMetadata', + 'TextExtractionDatasetMetadata', + 'TextExtractionModelMetadata', + 'TextSentimentDatasetMetadata', + 'TextSentimentModelMetadata', + 'TextExtractionAnnotation', + 'TextExtractionEvaluationMetrics', + 'TextSegment', + 'TextSentimentAnnotation', + 'TextSentimentEvaluationMetrics', + 'TranslationAnnotation', + 'TranslationDatasetMetadata', + 'TranslationEvaluationMetrics', + 'TranslationModelMetadata', +) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py new file mode 100644 index 00000000..0164e1be --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import classification as gca_classification +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import text_extraction as gca_text_extraction +from google.cloud.automl_v1.types import text_sentiment as gca_text_sentiment +from google.cloud.automl_v1.types import translation as gca_translation + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'AnnotationPayload', + }, +) + + +class AnnotationPayload(proto.Message): + r"""Contains annotation information that is relevant to AutoML. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + translation (google.cloud.automl_v1.types.TranslationAnnotation): + Annotation details for translation. + + This field is a member of `oneof`_ ``detail``. + classification (google.cloud.automl_v1.types.ClassificationAnnotation): + Annotation details for content or image + classification. + + This field is a member of `oneof`_ ``detail``. + image_object_detection (google.cloud.automl_v1.types.ImageObjectDetectionAnnotation): + Annotation details for image object + detection. + + This field is a member of `oneof`_ ``detail``. + text_extraction (google.cloud.automl_v1.types.TextExtractionAnnotation): + Annotation details for text extraction. + + This field is a member of `oneof`_ ``detail``. + text_sentiment (google.cloud.automl_v1.types.TextSentimentAnnotation): + Annotation details for text sentiment. + + This field is a member of `oneof`_ ``detail``. + annotation_spec_id (str): + Output only . The resource ID of the + annotation spec that this annotation pertains + to. The annotation spec comes from either an + ancestor dataset, or the dataset that was used + to train the model in use. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1.AnnotationSpec.display_name] + when the model was trained. Because this field returns a + value at model training time, for different models trained + using the same dataset, the returned value could be + different as model owner could update the ``display_name`` + between any two model training. + """ + + translation: gca_translation.TranslationAnnotation = proto.Field( + proto.MESSAGE, + number=2, + oneof='detail', + message=gca_translation.TranslationAnnotation, + ) + classification: gca_classification.ClassificationAnnotation = proto.Field( + proto.MESSAGE, + number=3, + oneof='detail', + message=gca_classification.ClassificationAnnotation, + ) + image_object_detection: detection.ImageObjectDetectionAnnotation = proto.Field( + proto.MESSAGE, + number=4, + oneof='detail', + message=detection.ImageObjectDetectionAnnotation, + ) + text_extraction: gca_text_extraction.TextExtractionAnnotation = proto.Field( + proto.MESSAGE, + number=6, + oneof='detail', + message=gca_text_extraction.TextExtractionAnnotation, + ) + text_sentiment: gca_text_sentiment.TextSentimentAnnotation = proto.Field( + proto.MESSAGE, + number=7, + oneof='detail', + message=gca_text_sentiment.TextSentimentAnnotation, + ) + annotation_spec_id: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py new file mode 100644 index 00000000..81961c68 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'AnnotationSpec', + }, +) + + +class AnnotationSpec(proto.Message): + r"""A definition of an annotation spec. + + Attributes: + name (str): + Output only. Resource name of the annotation spec. Form: + 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' + display_name (str): + Required. The name of the annotation spec to show in the + interface. The name can be up to 32 characters long and must + match the regexp ``[a-zA-Z0-9_]+``. + example_count (int): + Output only. The number of examples in the + parent dataset labeled by the annotation spec. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + example_count: int = proto.Field( + proto.INT32, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py new file mode 100644 index 00000000..1885dd52 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'ClassificationType', + 'ClassificationAnnotation', + 'ClassificationEvaluationMetrics', + }, +) + + +class ClassificationType(proto.Enum): + r"""Type of the classification problem. + + Values: + CLASSIFICATION_TYPE_UNSPECIFIED (0): + An un-set value of this enum. + MULTICLASS (1): + At most one label is allowed per example. + MULTILABEL (2): + Multiple labels are allowed for one example. + """ + CLASSIFICATION_TYPE_UNSPECIFIED = 0 + MULTICLASS = 1 + MULTILABEL = 2 + + +class ClassificationAnnotation(proto.Message): + r"""Contains annotation details specific to classification. + + Attributes: + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence that the annotation is positive. If a + user approves an annotation as negative or + positive, the score value remains unchanged. If + a user creates an annotation, the score is 0 for + negative or 1 for positive. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + ) + + +class ClassificationEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for classification problems. Note: For + Video Classification this metrics only describe quality of the Video + Classification predictions of "segment_classification" type. + + Attributes: + au_prc (float): + Output only. The Area Under Precision-Recall + Curve metric. Micro-averaged for the overall + evaluation. + au_roc (float): + Output only. The Area Under Receiver + Operating Characteristic curve metric. + Micro-averaged for the overall evaluation. + log_loss (float): + Output only. The Log Loss metric. + confidence_metrics_entry (MutableSequence[google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics for each confidence_threshold in + 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and + position_threshold = INT32_MAX_VALUE. ROC and + precision-recall curves, and other aggregated metrics are + derived from them. The confidence metrics entries may also + be supplied for additional values of position_threshold, but + from these no aggregated metrics are computed. + confusion_matrix (google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for MULTICLASS + classification problems where number of labels + is no more than 10. + Only set for model level evaluation, not for + evaluation per label. + annotation_spec_id (MutableSequence[str]): + Output only. The annotation spec ids used for + this evaluation. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. Metrics are computed with an + assumption that the model never returns + predictions with score lower than this value. + position_threshold (int): + Output only. Metrics are computed with an assumption that + the model always returns at most this many predictions + (ordered by their score, descendingly), but they all still + need to meet the confidence_threshold. + recall (float): + Output only. Recall (True Positive Rate) for + the given confidence threshold. + precision (float): + Output only. Precision for the given + confidence threshold. + false_positive_rate (float): + Output only. False Positive Rate for the + given confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + recall_at1 (float): + Output only. The Recall (True Positive Rate) + when only considering the label that has the + highest prediction score and not below the + confidence threshold for each example. + precision_at1 (float): + Output only. The precision when only + considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + false_positive_rate_at1 (float): + Output only. The False Positive Rate when + only considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + f1_score_at1 (float): + Output only. The harmonic mean of + [recall_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] + and + [precision_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. + true_positive_count (int): + Output only. The number of model created + labels that match a ground truth label. + false_positive_count (int): + Output only. The number of model created + labels that do not match a ground truth label. + false_negative_count (int): + Output only. The number of ground truth + labels that are not matched by a model created + label. + true_negative_count (int): + Output only. The number of labels that were + not created by the model, but if they would, + they would not match a ground truth label. + """ + + confidence_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + position_threshold: int = proto.Field( + proto.INT32, + number=14, + ) + recall: float = proto.Field( + proto.FLOAT, + number=2, + ) + precision: float = proto.Field( + proto.FLOAT, + number=3, + ) + false_positive_rate: float = proto.Field( + proto.FLOAT, + number=8, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=4, + ) + recall_at1: float = proto.Field( + proto.FLOAT, + number=5, + ) + precision_at1: float = proto.Field( + proto.FLOAT, + number=6, + ) + false_positive_rate_at1: float = proto.Field( + proto.FLOAT, + number=9, + ) + f1_score_at1: float = proto.Field( + proto.FLOAT, + number=7, + ) + true_positive_count: int = proto.Field( + proto.INT64, + number=10, + ) + false_positive_count: int = proto.Field( + proto.INT64, + number=11, + ) + false_negative_count: int = proto.Field( + proto.INT64, + number=12, + ) + true_negative_count: int = proto.Field( + proto.INT64, + number=13, + ) + + class ConfusionMatrix(proto.Message): + r"""Confusion matrix of the model running the classification. + + Attributes: + annotation_spec_id (MutableSequence[str]): + Output only. IDs of the annotation specs used in the + confusion matrix. For Tables CLASSIFICATION + [prediction_type][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type] + only list of [annotation_spec_display_name-s][] is + populated. + display_name (MutableSequence[str]): + Output only. Display name of the annotation specs used in + the confusion matrix, as they were at the moment of the + evaluation. For Tables CLASSIFICATION + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type], + distinct values of the target column at the moment of the + model evaluation are populated here. + row (MutableSequence[google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): + Output only. Rows in the confusion matrix. The number of + rows is equal to the size of ``annotation_spec_id``. + ``row[i].example_count[j]`` is the number of examples that + have ground truth of the ``annotation_spec_id[i]`` and are + predicted as ``annotation_spec_id[j]`` by the model being + evaluated. + """ + + class Row(proto.Message): + r"""Output only. A row in the confusion matrix. + + Attributes: + example_count (MutableSequence[int]): + Output only. Value of the specific cell in the confusion + matrix. The number of values each row has (i.e. the length + of the row) is equal to the length of the + ``annotation_spec_id`` field or, if that one is not + populated, length of the + [display_name][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] + field. + """ + + example_count: MutableSequence[int] = proto.RepeatedField( + proto.INT32, + number=1, + ) + + annotation_spec_id: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + display_name: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + row: MutableSequence['ClassificationEvaluationMetrics.ConfusionMatrix.Row'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ClassificationEvaluationMetrics.ConfusionMatrix.Row', + ) + + au_prc: float = proto.Field( + proto.FLOAT, + number=1, + ) + au_roc: float = proto.Field( + proto.FLOAT, + number=6, + ) + log_loss: float = proto.Field( + proto.FLOAT, + number=7, + ) + confidence_metrics_entry: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=ConfidenceMetricsEntry, + ) + confusion_matrix: ConfusionMatrix = proto.Field( + proto.MESSAGE, + number=4, + message=ConfusionMatrix, + ) + annotation_spec_id: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py new file mode 100644 index 00000000..0d71bb07 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py @@ -0,0 +1,337 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import geometry +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import text_segment as gca_text_segment + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'Image', + 'TextSnippet', + 'DocumentDimensions', + 'Document', + 'ExamplePayload', + }, +) + + +class Image(proto.Message): + r"""A representation of an image. + Only images up to 30MB in size are supported. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + image_bytes (bytes): + Image content represented as a stream of bytes. Note: As + with all ``bytes`` fields, protobuffers use a pure binary + representation, whereas JSON representations use base64. + + This field is a member of `oneof`_ ``data``. + thumbnail_uri (str): + Output only. HTTP URI to the thumbnail image. + """ + + image_bytes: bytes = proto.Field( + proto.BYTES, + number=1, + oneof='data', + ) + thumbnail_uri: str = proto.Field( + proto.STRING, + number=4, + ) + + +class TextSnippet(proto.Message): + r"""A representation of a text snippet. + + Attributes: + content (str): + Required. The content of the text snippet as + a string. Up to 250000 characters long. + mime_type (str): + Optional. The format of + [content][google.cloud.automl.v1.TextSnippet.content]. + Currently the only two allowed values are "text/html" and + "text/plain". If left blank, the format is automatically + determined from the type of the uploaded + [content][google.cloud.automl.v1.TextSnippet.content]. + content_uri (str): + Output only. HTTP URI where you can download + the content. + """ + + content: str = proto.Field( + proto.STRING, + number=1, + ) + mime_type: str = proto.Field( + proto.STRING, + number=2, + ) + content_uri: str = proto.Field( + proto.STRING, + number=4, + ) + + +class DocumentDimensions(proto.Message): + r"""Message that describes dimension of a document. + + Attributes: + unit (google.cloud.automl_v1.types.DocumentDimensions.DocumentDimensionUnit): + Unit of the dimension. + width (float): + Width value of the document, works together + with the unit. + height (float): + Height value of the document, works together + with the unit. + """ + class DocumentDimensionUnit(proto.Enum): + r"""Unit of the document dimension. + + Values: + DOCUMENT_DIMENSION_UNIT_UNSPECIFIED (0): + Should not be used. + INCH (1): + Document dimension is measured in inches. + CENTIMETER (2): + Document dimension is measured in + centimeters. + POINT (3): + Document dimension is measured in points. 72 + points = 1 inch. + """ + DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 + INCH = 1 + CENTIMETER = 2 + POINT = 3 + + unit: DocumentDimensionUnit = proto.Field( + proto.ENUM, + number=1, + enum=DocumentDimensionUnit, + ) + width: float = proto.Field( + proto.FLOAT, + number=2, + ) + height: float = proto.Field( + proto.FLOAT, + number=3, + ) + + +class Document(proto.Message): + r"""A structured text document e.g. a PDF. + + Attributes: + input_config (google.cloud.automl_v1.types.DocumentInputConfig): + An input config specifying the content of the + document. + document_text (google.cloud.automl_v1.types.TextSnippet): + The plain text version of this document. + layout (MutableSequence[google.cloud.automl_v1.types.Document.Layout]): + Describes the layout of the document. Sorted by + [page_number][]. + document_dimensions (google.cloud.automl_v1.types.DocumentDimensions): + The dimensions of the page in the document. + page_count (int): + Number of pages in the document. + """ + + class Layout(proto.Message): + r"""Describes the layout information of a + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in the document. + + Attributes: + text_segment (google.cloud.automl_v1.types.TextSegment): + Text Segment that represents a segment in + [document_text][google.cloud.automl.v1p1beta.Document.document_text]. + page_number (int): + Page number of the + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in the original document, starts from 1. + bounding_poly (google.cloud.automl_v1.types.BoundingPoly): + The position of the + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in the page. Contains exactly 4 + [normalized_vertices][google.cloud.automl.v1p1beta.BoundingPoly.normalized_vertices] + and they are connected by edges in the order provided, which + will represent a rectangle parallel to the frame. The + [NormalizedVertex-s][google.cloud.automl.v1p1beta.NormalizedVertex] + are relative to the page. Coordinates are based on top-left + as point (0,0). + text_segment_type (google.cloud.automl_v1.types.Document.Layout.TextSegmentType): + The type of the + [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] + in document. + """ + class TextSegmentType(proto.Enum): + r"""The type of TextSegment in the context of the original + document. + + Values: + TEXT_SEGMENT_TYPE_UNSPECIFIED (0): + Should not be used. + TOKEN (1): + The text segment is a token. e.g. word. + PARAGRAPH (2): + The text segment is a paragraph. + FORM_FIELD (3): + The text segment is a form field. + FORM_FIELD_NAME (4): + The text segment is the name part of a form field. It will + be treated as child of another FORM_FIELD TextSegment if its + span is subspan of another TextSegment with type FORM_FIELD. + FORM_FIELD_CONTENTS (5): + The text segment is the text content part of a form field. + It will be treated as child of another FORM_FIELD + TextSegment if its span is subspan of another TextSegment + with type FORM_FIELD. + TABLE (6): + The text segment is a whole table, including + headers, and all rows. + TABLE_HEADER (7): + The text segment is a table's headers. It + will be treated as child of another TABLE + TextSegment if its span is subspan of another + TextSegment with type TABLE. + TABLE_ROW (8): + The text segment is a row in table. It will + be treated as child of another TABLE TextSegment + if its span is subspan of another TextSegment + with type TABLE. + TABLE_CELL (9): + The text segment is a cell in table. It will be treated as + child of another TABLE_ROW TextSegment if its span is + subspan of another TextSegment with type TABLE_ROW. + """ + TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 + TOKEN = 1 + PARAGRAPH = 2 + FORM_FIELD = 3 + FORM_FIELD_NAME = 4 + FORM_FIELD_CONTENTS = 5 + TABLE = 6 + TABLE_HEADER = 7 + TABLE_ROW = 8 + TABLE_CELL = 9 + + text_segment: gca_text_segment.TextSegment = proto.Field( + proto.MESSAGE, + number=1, + message=gca_text_segment.TextSegment, + ) + page_number: int = proto.Field( + proto.INT32, + number=2, + ) + bounding_poly: geometry.BoundingPoly = proto.Field( + proto.MESSAGE, + number=3, + message=geometry.BoundingPoly, + ) + text_segment_type: 'Document.Layout.TextSegmentType' = proto.Field( + proto.ENUM, + number=4, + enum='Document.Layout.TextSegmentType', + ) + + input_config: io.DocumentInputConfig = proto.Field( + proto.MESSAGE, + number=1, + message=io.DocumentInputConfig, + ) + document_text: 'TextSnippet' = proto.Field( + proto.MESSAGE, + number=2, + message='TextSnippet', + ) + layout: MutableSequence[Layout] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Layout, + ) + document_dimensions: 'DocumentDimensions' = proto.Field( + proto.MESSAGE, + number=4, + message='DocumentDimensions', + ) + page_count: int = proto.Field( + proto.INT32, + number=5, + ) + + +class ExamplePayload(proto.Message): + r"""Example data used for training or prediction. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + image (google.cloud.automl_v1.types.Image): + Example image. + + This field is a member of `oneof`_ ``payload``. + text_snippet (google.cloud.automl_v1.types.TextSnippet): + Example text. + + This field is a member of `oneof`_ ``payload``. + document (google.cloud.automl_v1.types.Document): + Example document. + + This field is a member of `oneof`_ ``payload``. + """ + + image: 'Image' = proto.Field( + proto.MESSAGE, + number=1, + oneof='payload', + message='Image', + ) + text_snippet: 'TextSnippet' = proto.Field( + proto.MESSAGE, + number=2, + oneof='payload', + message='TextSnippet', + ) + document: 'Document' = proto.Field( + proto.MESSAGE, + number=4, + oneof='payload', + message='Document', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py new file mode 100644 index 00000000..17dc3e7e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import translation +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'Dataset', + }, +) + + +class Dataset(proto.Message): + r"""A workspace for solving a single, particular machine learning + (ML) problem. A workspace contains examples that may be + annotated. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + translation_dataset_metadata (google.cloud.automl_v1.types.TranslationDatasetMetadata): + Metadata for a dataset used for translation. + + This field is a member of `oneof`_ ``dataset_metadata``. + image_classification_dataset_metadata (google.cloud.automl_v1.types.ImageClassificationDatasetMetadata): + Metadata for a dataset used for image + classification. + + This field is a member of `oneof`_ ``dataset_metadata``. + text_classification_dataset_metadata (google.cloud.automl_v1.types.TextClassificationDatasetMetadata): + Metadata for a dataset used for text + classification. + + This field is a member of `oneof`_ ``dataset_metadata``. + image_object_detection_dataset_metadata (google.cloud.automl_v1.types.ImageObjectDetectionDatasetMetadata): + Metadata for a dataset used for image object + detection. + + This field is a member of `oneof`_ ``dataset_metadata``. + text_extraction_dataset_metadata (google.cloud.automl_v1.types.TextExtractionDatasetMetadata): + Metadata for a dataset used for text + extraction. + + This field is a member of `oneof`_ ``dataset_metadata``. + text_sentiment_dataset_metadata (google.cloud.automl_v1.types.TextSentimentDatasetMetadata): + Metadata for a dataset used for text + sentiment. + + This field is a member of `oneof`_ ``dataset_metadata``. + name (str): + Output only. The resource name of the dataset. Form: + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`` + display_name (str): + Required. The name of the dataset to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. + description (str): + User-provided description of the dataset. The + description can be up to 25000 characters long. + example_count (int): + Output only. The number of examples in the + dataset. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this dataset was + created. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (MutableMapping[str, str]): + Optional. The labels with user-defined + metadata to organize your dataset. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. Label values are optional. Label + keys must start with a letter. + + See https://goo.gl/xmQnxf for more information + on and examples of labels. + """ + + translation_dataset_metadata: translation.TranslationDatasetMetadata = proto.Field( + proto.MESSAGE, + number=23, + oneof='dataset_metadata', + message=translation.TranslationDatasetMetadata, + ) + image_classification_dataset_metadata: image.ImageClassificationDatasetMetadata = proto.Field( + proto.MESSAGE, + number=24, + oneof='dataset_metadata', + message=image.ImageClassificationDatasetMetadata, + ) + text_classification_dataset_metadata: text.TextClassificationDatasetMetadata = proto.Field( + proto.MESSAGE, + number=25, + oneof='dataset_metadata', + message=text.TextClassificationDatasetMetadata, + ) + image_object_detection_dataset_metadata: image.ImageObjectDetectionDatasetMetadata = proto.Field( + proto.MESSAGE, + number=26, + oneof='dataset_metadata', + message=image.ImageObjectDetectionDatasetMetadata, + ) + text_extraction_dataset_metadata: text.TextExtractionDatasetMetadata = proto.Field( + proto.MESSAGE, + number=28, + oneof='dataset_metadata', + message=text.TextExtractionDatasetMetadata, + ) + text_sentiment_dataset_metadata: text.TextSentimentDatasetMetadata = proto.Field( + proto.MESSAGE, + number=30, + oneof='dataset_metadata', + message=text.TextSentimentDatasetMetadata, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + example_count: int = proto.Field( + proto.INT32, + number=21, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + etag: str = proto.Field( + proto.STRING, + number=17, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=39, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py new file mode 100644 index 00000000..90b2028d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import geometry + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'ImageObjectDetectionAnnotation', + 'BoundingBoxMetricsEntry', + 'ImageObjectDetectionEvaluationMetrics', + }, +) + + +class ImageObjectDetectionAnnotation(proto.Message): + r"""Annotation details for image object detection. + + Attributes: + bounding_box (google.cloud.automl_v1.types.BoundingPoly): + Output only. The rectangle representing the + object location. + score (float): + Output only. The confidence that this annotation is positive + for the parent example, value in [0, 1], higher means higher + positivity confidence. + """ + + bounding_box: geometry.BoundingPoly = proto.Field( + proto.MESSAGE, + number=1, + message=geometry.BoundingPoly, + ) + score: float = proto.Field( + proto.FLOAT, + number=2, + ) + + +class BoundingBoxMetricsEntry(proto.Message): + r"""Bounding box matching model metrics for a single + intersection-over-union threshold and multiple label match + confidence thresholds. + + Attributes: + iou_threshold (float): + Output only. The intersection-over-union + threshold value used to compute this metrics + entry. + mean_average_precision (float): + Output only. The mean average precision, most often close to + au_prc. + confidence_metrics_entries (MutableSequence[google.cloud.automl_v1.types.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): + Output only. Metrics for each label-match + confidence_threshold from + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall + curve is derived from them. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + recall: float = proto.Field( + proto.FLOAT, + number=2, + ) + precision: float = proto.Field( + proto.FLOAT, + number=3, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=4, + ) + + iou_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + mean_average_precision: float = proto.Field( + proto.FLOAT, + number=2, + ) + confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=ConfidenceMetricsEntry, + ) + + +class ImageObjectDetectionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for image object detection problems. + Evaluates prediction quality of labeled bounding boxes. + + Attributes: + evaluated_bounding_box_count (int): + Output only. The total number of bounding + boxes (i.e. summed over all images) the ground + truth used to create this evaluation had. + bounding_box_metrics_entries (MutableSequence[google.cloud.automl_v1.types.BoundingBoxMetricsEntry]): + Output only. The bounding boxes match metrics + for each Intersection-over-union threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each + label confidence threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + bounding_box_mean_average_precision (float): + Output only. The single metric for bounding boxes + evaluation: the mean_average_precision averaged over all + bounding_box_metrics_entries. + """ + + evaluated_bounding_box_count: int = proto.Field( + proto.INT32, + number=1, + ) + bounding_box_metrics_entries: MutableSequence['BoundingBoxMetricsEntry'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='BoundingBoxMetricsEntry', + ) + bounding_box_mean_average_precision: float = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py new file mode 100644 index 00000000..f9c36025 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'NormalizedVertex', + 'BoundingPoly', + }, +) + + +class NormalizedVertex(proto.Message): + r"""A vertex represents a 2D point in the image. + The normalized vertex coordinates are between 0 to 1 fractions + relative to the original plane (image, video). E.g. if the plane + (e.g. whole image) would have size 10 x 20 then a point with + normalized coordinates (0.1, 0.3) would be at the position (1, + 6) on that plane. + + Attributes: + x (float): + Required. Horizontal coordinate. + y (float): + Required. Vertical coordinate. + """ + + x: float = proto.Field( + proto.FLOAT, + number=1, + ) + y: float = proto.Field( + proto.FLOAT, + number=2, + ) + + +class BoundingPoly(proto.Message): + r"""A bounding polygon of a detected object on a plane. On output both + vertices and normalized_vertices are provided. The polygon is formed + by connecting vertices in the order they are listed. + + Attributes: + normalized_vertices (MutableSequence[google.cloud.automl_v1.types.NormalizedVertex]): + Output only . The bounding polygon normalized + vertices. + """ + + normalized_vertices: MutableSequence['NormalizedVertex'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='NormalizedVertex', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/image.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/image.py new file mode 100644 index 00000000..522af62f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/image.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import classification + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'ImageClassificationDatasetMetadata', + 'ImageObjectDetectionDatasetMetadata', + 'ImageClassificationModelMetadata', + 'ImageObjectDetectionModelMetadata', + 'ImageClassificationModelDeploymentMetadata', + 'ImageObjectDetectionModelDeploymentMetadata', + }, +) + + +class ImageClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to image classification. + + Attributes: + classification_type (google.cloud.automl_v1.types.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type: classification.ClassificationType = proto.Field( + proto.ENUM, + number=1, + enum=classification.ClassificationType, + ) + + +class ImageObjectDetectionDatasetMetadata(proto.Message): + r"""Dataset metadata specific to image object detection. + """ + + +class ImageClassificationModelMetadata(proto.Message): + r"""Model metadata for image classification. + + Attributes: + base_model_id (str): + Optional. The ID of the ``base`` model. If it is specified, + the new model will be created based on the ``base`` model. + Otherwise, the new model will be created from scratch. The + ``base`` model must be in the same ``project`` and + ``location`` as the new model to create, and have the same + ``model_type``. + train_budget_milli_node_hours (int): + Optional. The train budget of creating this model, expressed + in milli node hours i.e. 1,000 value in this field means 1 + node hour. The actual ``train_cost`` will be equal or less + than this value. If further model training ceases to provide + any improvements, it will stop without using full budget and + the stop_reason will be ``MODEL_CONVERGED``. Note, node_hour + = actual_hour \* number_of_nodes_invovled. For model type + ``cloud``\ (default), the train budget must be between 8,000 + and 800,000 milli node hours, inclusive. The default value + is 192, 000 which represents one day in wall time. For model + type ``mobile-low-latency-1``, ``mobile-versatile-1``, + ``mobile-high-accuracy-1``, + ``mobile-core-ml-low-latency-1``, + ``mobile-core-ml-versatile-1``, + ``mobile-core-ml-high-accuracy-1``, the train budget must be + between 1,000 and 100,000 milli node hours, inclusive. The + default value is 24, 000 which represents one day in wall + time. + train_cost_milli_node_hours (int): + Output only. The actual train cost of + creating this model, expressed in milli node + hours, i.e. 1,000 value in this field means 1 + node hour. Guaranteed to not exceed the train + budget. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud`` - Model to be used via prediction calls to + AutoML API. This is the default value. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + - ``mobile-core-ml-low-latency-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have low latency, but may have lower + prediction quality than other models. + - ``mobile-core-ml-versatile-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + - ``mobile-core-ml-high-accuracy-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have a higher latency, but should also have a + higher prediction quality than other models. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the node_qps field. + """ + + base_model_id: str = proto.Field( + proto.STRING, + number=1, + ) + train_budget_milli_node_hours: int = proto.Field( + proto.INT64, + number=16, + ) + train_cost_milli_node_hours: int = proto.Field( + proto.INT64, + number=17, + ) + stop_reason: str = proto.Field( + proto.STRING, + number=5, + ) + model_type: str = proto.Field( + proto.STRING, + number=7, + ) + node_qps: float = proto.Field( + proto.DOUBLE, + number=13, + ) + node_count: int = proto.Field( + proto.INT64, + number=14, + ) + + +class ImageObjectDetectionModelMetadata(proto.Message): + r"""Model metadata specific to image object detection. + + Attributes: + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud-high-accuracy-1`` - (default) A model to be used + via prediction calls to AutoML API. Expected to have a + higher latency, but should also have a higher prediction + quality than other models. + - ``cloud-low-latency-1`` - A model to be used via + prediction calls to AutoML API. Expected to have low + latency, but may have lower prediction quality than other + models. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the qps_per_node + field. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + train_budget_milli_node_hours (int): + Optional. The train budget of creating this model, expressed + in milli node hours i.e. 1,000 value in this field means 1 + node hour. The actual ``train_cost`` will be equal or less + than this value. If further model training ceases to provide + any improvements, it will stop without using full budget and + the stop_reason will be ``MODEL_CONVERGED``. Note, node_hour + = actual_hour \* number_of_nodes_invovled. For model type + ``cloud-high-accuracy-1``\ (default) and + ``cloud-low-latency-1``, the train budget must be between + 20,000 and 900,000 milli node hours, inclusive. The default + value is 216, 000 which represents one day in wall time. For + model type ``mobile-low-latency-1``, ``mobile-versatile-1``, + ``mobile-high-accuracy-1``, + ``mobile-core-ml-low-latency-1``, + ``mobile-core-ml-versatile-1``, + ``mobile-core-ml-high-accuracy-1``, the train budget must be + between 1,000 and 100,000 milli node hours, inclusive. The + default value is 24, 000 which represents one day in wall + time. + train_cost_milli_node_hours (int): + Output only. The actual train cost of + creating this model, expressed in milli node + hours, i.e. 1,000 value in this field means 1 + node hour. Guaranteed to not exceed the train + budget. + """ + + model_type: str = proto.Field( + proto.STRING, + number=1, + ) + node_count: int = proto.Field( + proto.INT64, + number=3, + ) + node_qps: float = proto.Field( + proto.DOUBLE, + number=4, + ) + stop_reason: str = proto.Field( + proto.STRING, + number=5, + ) + train_budget_milli_node_hours: int = proto.Field( + proto.INT64, + number=6, + ) + train_cost_milli_node_hours: int = proto.Field( + proto.INT64, + number=7, + ) + + +class ImageClassificationModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Classification. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + [node_qps][google.cloud.automl.v1.ImageClassificationModelMetadata.node_qps]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count: int = proto.Field( + proto.INT64, + number=1, + ) + + +class ImageObjectDetectionModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Object Detection. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + [qps_per_node][google.cloud.automl.v1.ImageObjectDetectionModelMetadata.qps_per_node]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count: int = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/io.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/io.py new file mode 100644 index 00000000..72e0972d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/io.py @@ -0,0 +1,1572 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'InputConfig', + 'BatchPredictInputConfig', + 'DocumentInputConfig', + 'OutputConfig', + 'BatchPredictOutputConfig', + 'ModelExportOutputConfig', + 'GcsSource', + 'GcsDestination', + }, +) + + +class InputConfig(proto.Message): + r"""Input configuration for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] + action. + + The format of input depends on dataset_metadata the Dataset into + which the import is happening has. As input source the + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is + expected, unless specified otherwise. Additionally any input .CSV + file by itself must be 100MB or smaller, unless specified otherwise. + If an "example" file (that is, image, video etc.) with identical + content (even if it had different ``GCS_FILE_PATH``) is mentioned + multiple times, then its label, bounding boxes etc. are appended. + The same file should be always provided with the same ``ML_USE`` and + ``GCS_FILE_PATH``, if it is not, then these values are + nondeterministically selected from the given ones. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + .. raw:: html + +

AutoML Vision

+ + .. raw:: html + +
Classification
+ + See `Preparing your training + data `__ for + more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH,LABEL,LABEL,... + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image + of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, + .WEBP, .BMP, .TIFF, .ICO. + + - ``LABEL`` - A label that identifies the object in the image. + + For the ``MULTICLASS`` classification type, at most one ``LABEL`` is + allowed per image. If an image has not yet been labeled, then it + should be mentioned just once with no ``LABEL``. + + Some sample rows: + + :: + + TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg + + .. raw:: html + +
Object Detection
+ See [Preparing your training + data](https://cloud.google.com/vision/automl/object-detection/docs/prepare) + for more information. + + A CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image + of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. + Each image is assumed to be exhaustively labeled. + + - ``LABEL`` - A label that identifies the object in the image + specified by the ``BOUNDING_BOX``. + + - ``BOUNDING BOX`` - The vertices of an object in the example + image. The minimum allowed ``BOUNDING_BOX`` edge length is 0.01, + and no more than 500 ``BOUNDING_BOX`` instances per image are + allowed (one ``BOUNDING_BOX`` per line). If an image has no + looked for objects then it should be mentioned just once with no + LABEL and the ",,,,,,," in place of the ``BOUNDING_BOX``. + + **Four sample rows:** + + :: + + TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Video Intelligence

+ + .. raw:: html + +
Classification
+ + See `Preparing your training + data `__ + for more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH + + For ``ML_USE``, do not use ``VALIDATE``. + + ``GCS_FILE_PATH`` is the path to another .csv file that describes + training example for a given ``ML_USE``, using the following row + format: + + :: + + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) + + Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up + to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + Any segment of a video which has one or more labels on it, is + considered a hard negative for all other labels. Any segment with no + labels on it is considered to be unknown. If a whole video is + unknown, then it should be mentioned just once with ",," in place of + ``LABEL, TIME_SEGMENT_START,TIME_SEGMENT_END``. + + Sample top level CSV file: + + :: + + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv + + Sample rows of a CSV file for a particular ML_USE: + + :: + + gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 + gs://folder/vid3.avi,,, + + .. raw:: html + +
Object Tracking
+ + See `Preparing your training + data `__ + for more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,GCS_FILE_PATH + + For ``ML_USE``, do not use ``VALIDATE``. + + ``GCS_FILE_PATH`` is the path to another .csv file that describes + training example for a given ``ML_USE``, using the following row + format: + + :: + + GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX + + or + + :: + + GCS_FILE_PATH,,,,,,,,,, + + Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up + to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + Providing ``INSTANCE_ID``\ s can help to obtain a better model. When + a specific labeled entity leaves the video frame, and shows up + afterwards it is not required, albeit preferable, that the same + ``INSTANCE_ID`` is given to it. + + ``TIMESTAMP`` must be within the length of the video, the + ``BOUNDING_BOX`` is assumed to be drawn on the closest video's frame + to the ``TIMESTAMP``. Any mentioned by the ``TIMESTAMP`` frame is + expected to be exhaustively labeled and no more than 500 + ``BOUNDING_BOX``-es per frame are allowed. If a whole video is + unknown, then it should be mentioned just once with ",,,,,,,,,," in + place of ``LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. + + Sample top level CSV file: + + :: + + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv + + Seven sample rows of a CSV file for a particular ML_USE: + + :: + + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Natural Language

+ + .. raw:: html + +
Entity Extraction
+ + See `Preparing your training + data `__ for + more information. + + One or more CSV file(s) with each line in the following format: + + :: + + ML_USE,GCS_FILE_PATH + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing.. + + - ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored + in Google Cloud Storage that contains in-line text in-line as + documents for model training. + + After the training data set has been determined from the ``TRAIN`` + and ``UNASSIGNED`` CSV files, the training data is divided into + train and validation data sets. 70% for training and 30% for + validation. + + For example: + + :: + + TRAIN,gs://folder/file1.jsonl + VALIDATE,gs://folder/file2.jsonl + TEST,gs://folder/file3.jsonl + + **In-line JSONL files** + + In-line .JSONL files contain, per line, a JSON document that wraps a + [``text_snippet``][google.cloud.automl.v1.TextSnippet] field + followed by one or more + [``annotations``][google.cloud.automl.v1.AnnotationPayload] fields, + which have ``display_name`` and ``text_extraction`` fields to + describe the entity from the text snippet. Multiple JSON documents + can be separated using line breaks (\n). + + The supplied text must be annotated exhaustively. For example, if + you include the text "horse", but do not label it as "animal", then + "horse" is assumed to not be an "animal". + + Any given text snippet content must have 30,000 characters or less, + and also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC + encoded. + + For example: + + :: + + { + "text_snippet": { + "content": "dog car cat" + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 0, "end_offset": 2} + } + }, + { + "display_name": "vehicle", + "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 6} + } + }, + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 8, "end_offset": 10} + } + } + ] + }\n + { + "text_snippet": { + "content": "This dog is good." + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 5, "end_offset": 7} + } + } + ] + } + + **JSONL files that reference documents** + + .JSONL files contain, per line, a JSON document that wraps a + ``input_config`` that contains the path to a source document. + Multiple JSON documents can be separated using line breaks (\n). + + Supported document extensions: .PDF, .TIF, .TIFF + + For example: + + :: + + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } + } + } + } + + **In-line JSONL files with document layout information** + + **Note:** You can only annotate documents using the UI. The format + described below applies to annotated documents exported using the UI + or ``exportData``. + + In-line .JSONL files for documents contain, per line, a JSON + document that wraps a ``document`` field that provides the textual + content of the document and the layout information. + + For example: + + :: + + { + "document": { + "document_text": { + "content": "dog car cat" + } + "layout": [ + { + "text_segment": { + "start_offset": 0, + "end_offset": 11, + }, + "page_number": 1, + "bounding_poly": { + "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, + ], + }, + "text_segment_type": TOKEN, + } + ], + "document_dimensions": { + "width": 8.27, + "height": 11.69, + "unit": INCH, + } + "page_count": 3, + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 0, "end_offset": 3} + } + }, + { + "display_name": "vehicle", + "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 7} + } + }, + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 8, "end_offset": 11} + } + }, + ], + + .. raw:: html + +
Classification
+ + See `Preparing your training + data `__ + for more information. + + One or more CSV file(s) with each line in the following format: + + :: + + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a + pattern. If the column content is a valid Google Cloud Storage + file path, that is, prefixed by "gs://", it is treated as a + ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in + double quotes (""), it is treated as a ``TEXT_SNIPPET``. For + ``GCS_FILE_PATH``, the path must lead to a file with supported + extension and UTF-8 encoding, for example, + "gs://folder/content.txt" AutoML imports the file content as a + text snippet. For ``TEXT_SNIPPET``, AutoML imports the column + content excluding quotes. In both cases, size of the content must + be 10MB or less in size. For zip files, the size of each file + inside the zip must be 10MB or less in size. + + For the ``MULTICLASS`` classification type, at most one ``LABEL`` + is allowed. + + The ``ML_USE`` and ``LABEL`` columns are optional. Supported file + extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP + + A maximum of 100 unique labels are allowed per CSV row. + + Sample rows: + + :: + + TRAIN,"They have bad food and very rude",RudeService,BadFood + gs://folder/content.txt,SlowService + TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,BadFood + + .. raw:: html + +
Sentiment Analysis
+ + See `Preparing your training + data `__ + for more information. + + CSV file(s) with each line in format: + + :: + + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT + + - ``ML_USE`` - Identifies the data set that the current row (file) + applies to. This value can be one of the following: + + - ``TRAIN`` - Rows in this file are used to train the model. + - ``TEST`` - Rows in this file are used to test the model during + training. + - ``UNASSIGNED`` - Rows in this file are not categorized. They + are Automatically divided into train and test data. 80% for + training and 20% for testing. + + - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a + pattern. If the column content is a valid Google Cloud Storage + file path, that is, prefixed by "gs://", it is treated as a + ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in + double quotes (""), it is treated as a ``TEXT_SNIPPET``. For + ``GCS_FILE_PATH``, the path must lead to a file with supported + extension and UTF-8 encoding, for example, + "gs://folder/content.txt" AutoML imports the file content as a + text snippet. For ``TEXT_SNIPPET``, AutoML imports the column + content excluding quotes. In both cases, size of the content must + be 128kB or less in size. For zip files, the size of each file + inside the zip must be 128kB or less in size. + + The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported + file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP + + - ``SENTIMENT`` - An integer between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive). Describes the ordinal of the sentiment - higher + value means a more positive sentiment. All the values are + completely relative, i.e. neither 0 needs to mean a negative or + neutral sentiment nor sentiment_max needs to mean a positive one + - it is just required that 0 is the least positive sentiment in + the data, and sentiment_max is the most positive one. The + SENTIMENT shouldn't be confused with "score" or "magnitude" from + the previous Natural Language Sentiment Analysis API. All + SENTIMENT values between 0 and sentiment_max must be represented + in the imported data. On prediction the same 0 to sentiment_max + range will be used. The difference between neighboring sentiment + values needs not to be uniform, e.g. 1 and 2 may be similar + whereas the difference between 2 and 3 may be large. + + Sample rows: + + :: + + TRAIN,"@freewrytin this is way too good for your product",2 + gs://folder/content.txt,3 + TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,2 + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Tables

+ + See `Preparing your training + data `__ for + more information. + + You can use either + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or + [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. + All input is concatenated into a single + [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] + + **For gcs_source:** + + CSV file(s), where the first row of the first file is the header, + containing unique column names. If the first row of a subsequent + file is the same as the header, then it is also treated as a header. + All other rows contain values for the corresponding columns. + + Each .CSV file by itself must be 10GB or smaller, and their total + size must be 100GB or smaller. + + First three sample rows of a CSV file: + + .. raw:: html + +
+        "Id","First Name","Last Name","Dob","Addresses"
+        "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+        "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+        
+ + **For bigquery_source:** + + An URI of a BigQuery table. The user data size of the BigQuery table + must be 100GB or smaller. + + An imported table must have between 2 and 1,000 columns, inclusive, + and between 1000 and 100,000,000 rows, inclusive. There are at most + 5 import data running in parallel. + + .. raw:: html + +
+
+ + **Input field definitions:** + + ``ML_USE`` : ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED") + Describes how the given example (file) should be used for model + training. "UNASSIGNED" can be used when user has no preference. + + ``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For + example, "gs://folder/image1.png". + + ``LABEL`` : A display name of an object on an image, video etc., + e.g. "dog". Must be up to 32 characters long and can consist only of + ASCII Latin letters A-Z and a-z, underscores(_), and ASCII digits + 0-9. For each label an AnnotationSpec is created which display_name + becomes the label; AnnotationSpecs are given back in predictions. + + ``INSTANCE_ID`` : A positive integer that identifies a specific + instance of a labeled entity on an example. Used e.g. to track two + cars on a video while being able to tell apart which one is which. + + ``BOUNDING_BOX`` : (``VERTEX,VERTEX,VERTEX,VERTEX`` \| + ``VERTEX,,,VERTEX,,``) A rectangle parallel to the frame of the + example (image, video). If 4 vertices are given they are connected + by edges in the order provided, if 2 are given they are recognized + as diagonally opposite vertices of the rectangle. + + ``VERTEX`` : (``COORDINATE,COORDINATE``) First coordinate is + horizontal (x), the second is vertical (y). + + ``COORDINATE`` : A float in 0 to 1 range, relative to total length + of image or video in given dimension. For fractions the leading + non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top + left. + + ``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). + + ``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end, + exclusive, of a time segment within n example that has a time + dimension (e.g. video). + + ``TIME_OFFSET`` : A number of seconds as measured from the start of + an example (e.g. video). Fractions are allowed, up to a microsecond + precision. "inf" is allowed, and it means the end of the example. + + ``TEXT_SNIPPET`` : The content of a text snippet, UTF-8 encoded, + enclosed within double quotes (""). + + ``DOCUMENT`` : A field that provides the textual content with + document and the layout information. + + **Errors:** + + If any of the provided CSV files can't be parsed or if more than + certain percent of CSV rows cannot be processed then the operation + fails and nothing is imported. Regardless of overall success or + failure the per-row failures, up to a certain count cap, is listed + in Operation.metadata.partial_failures. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.automl_v1.types.GcsSource): + The Google Cloud Storage location for the input content. For + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], + ``gcs_source`` points to a CSV file with a structure + described in + [InputConfig][google.cloud.automl.v1.InputConfig]. + + This field is a member of `oneof`_ ``source``. + params (MutableMapping[str, str]): + Additional domain-specific parameters describing the + semantic of the imported data, any string must be up to + 25000 characters long. + + .. raw:: html + +

AutoML Tables

+ + ``schema_inference_version`` : (integer) This value must be + supplied. The version of the algorithm to use for the + initial inference of the column data types of the imported + table. Allowed values: "1". + """ + + gcs_source: 'GcsSource' = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message='GcsSource', + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class BatchPredictInputConfig(proto.Message): + r"""Input configuration for BatchPredict Action. + + The format of input depends on the ML problem of the model used for + prediction. As input source the + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is + expected, unless specified otherwise. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + .. raw:: html + +

AutoML Vision

+
Classification
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + The Google Cloud Storage location of an image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the + ID in the batch predict output. + + Sample rows: + + :: + + gs://folder/image1.jpeg + gs://folder/image2.gif + gs://folder/image3.png + + .. raw:: html + +
Object Detection
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + The Google Cloud Storage location of an image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the + ID in the batch predict output. + + Sample rows: + + :: + + gs://folder/image1.jpeg + gs://folder/image2.gif + gs://folder/image3.png + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Video Intelligence

+
Classification
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up + to 50GB in size and up to 3h in duration duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + + Sample rows: + + :: + + gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + .. raw:: html + +
Object Tracking
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up + to 50GB in size and up to 3h in duration duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. + + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + + Sample rows: + + :: + + gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Natural Language

+
Classification
+ + One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text + file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF + + Text files can be no larger than 10MB in size. + + Sample rows: + + :: + + gs://folder/text1.txt + gs://folder/text2.pdf + gs://folder/text3.tif + + .. raw:: html + +
Sentiment Analysis
+ One or more CSV files where each line is a single column: + + :: + + GCS_FILE_PATH + + ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text + file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF + + Text files can be no larger than 128kB in size. + + Sample rows: + + :: + + gs://folder/text1.txt + gs://folder/text2.pdf + gs://folder/text3.tif + + .. raw:: html + +
Entity Extraction
+ + One or more JSONL (JSON Lines) files that either provide inline text + or documents. You can only use one format, either inline text or + documents, for a single call to [AutoMl.BatchPredict]. + + Each JSONL file contains a per line a proto that wraps a temporary + user-assigned TextSnippet ID (string up to 2000 characters long) + called "id", a TextSnippet proto (in JSON representation) and zero + or more TextFeature protos. Any given text snippet content must have + 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII + already is). The IDs provided should be unique. + + Each document JSONL file contains, per line, a proto that wraps a + Document proto with ``input_config`` set. Each document cannot + exceed 2MB in size. + + Supported document extensions: .PDF, .TIF, .TIFF + + Each JSONL file must not exceed 100MB in size, and no more than 20 + JSONL files may be passed. + + Sample inline JSONL file (Shown with artificial line breaks. Actual + line breaks are denoted by "\n".): + + :: + + { + "id": "my_first_id", + "text_snippet": { "content": "dog car cat"}, + "text_features": [ + { + "text_segment": {"start_offset": 4, "end_offset": 6}, + "structural_type": PARAGRAPH, + "bounding_poly": { + "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, + ] + }, + } + ], + }\n + { + "id": "2", + "text_snippet": { + "content": "Extended sample content", + "mime_type": "text/plain" + } + } + + Sample document JSONL file (Shown with artificial line breaks. + Actual line breaks are denoted by "\n".): + + :: + + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } + } + } + } + + .. raw:: html + +
+
+ + .. raw:: html + +

AutoML Tables

+ + See `Preparing your training + data `__ + for more information. + + You can use either + [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source] + or [bigquery_source][BatchPredictInputConfig.bigquery_source]. + + **For gcs_source:** + + CSV file(s), each by itself 10GB or smaller and total size must be + 100GB or smaller, where first file must have a header containing + column names. If the first row of a subsequent file is the same as + the header, then it is also treated as a header. All other rows + contain values for the corresponding columns. + + The column names must contain the model's + [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows, i.e. the CSV + lines, will be attempted. + + Sample rows from a CSV file: + + .. raw:: html + +
+        "First Name","Last Name","Dob","Addresses"
+        "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+        "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+        
+ + **For bigquery_source:** + + The URI of a BigQuery table. The user data size of the BigQuery + table must be 100GB or smaller. + + The column names must contain the model's + [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows of the table + will be attempted. + + .. raw:: html + +
+
+ + **Input field definitions:** + + ``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For + example, "gs://folder/video.avi". + + ``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). + + ``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end, + exclusive, of a time segment within n example that has a time + dimension (e.g. video). + + ``TIME_OFFSET`` : A number of seconds as measured from the start of + an example (e.g. video). Fractions are allowed, up to a microsecond + precision. "inf" is allowed, and it means the end of the example. + + **Errors:** + + If any of the provided CSV files can't be parsed or if more than + certain percent of CSV rows cannot be processed then the operation + fails and prediction does not happen. Regardless of overall success + or failure the per-row failures, up to a certain count cap, will be + listed in Operation.metadata.partial_failures. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.automl_v1.types.GcsSource): + Required. The Google Cloud Storage location + for the input content. + + This field is a member of `oneof`_ ``source``. + """ + + gcs_source: 'GcsSource' = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message='GcsSource', + ) + + +class DocumentInputConfig(proto.Message): + r"""Input configuration of a + [Document][google.cloud.automl.v1.Document]. + + Attributes: + gcs_source (google.cloud.automl_v1.types.GcsSource): + The Google Cloud Storage location of the + document file. Only a single path should be + given. + + Max supported size: 512MB. + + Supported extensions: .PDF. + """ + + gcs_source: 'GcsSource' = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class OutputConfig(proto.Message): + r"""- For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file + which describes examples that have given ML_USE, using the + following row format per line: TEXT_SNIPPET (in source language) + \\t TEXT_SNIPPET (in target language) + + - For Tables: Output depends on whether the dataset was imported + from Google Cloud Storage or BigQuery. Google Cloud Storage + case: + [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] + must be set. Exported are CSV file(s) ``tables_1.csv``, + ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as + header line the table's column names, and all other lines + contain values for the header columns. BigQuery case: + [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given + project a new dataset will be created with name + ``export_data__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp + will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" + format. In that dataset a new table called ``primary_table`` + will be created, and filled with precisely the same data as + this obtained on import. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.automl_v1.types.GcsDestination): + Required. The Google Cloud Storage location where the output + is to be written to. For Image Object Detection, Text + Extraction, Video Classification and Tables, in the given + directory a new directory will be created with name: + export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ + ISO-8601 format. All export output will be written into that + directory. + + This field is a member of `oneof`_ ``destination``. + """ + + gcs_destination: 'GcsDestination' = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message='GcsDestination', + ) + + +class BatchPredictOutputConfig(proto.Message): + r"""Output configuration for BatchPredict Action. + + As destination the + [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination] + must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory + is created. Its name will be "prediction--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends + on the ML problem the predictions are made for. + + - For Image Classification: In the created directory files + ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. A + single image will be listed only once with all its annotations, + and its annotations will never be split across files. Each .JSONL + file will contain, per line, a JSON representation of a proto + that wraps image's "ID" : "" followed by a list of zero + or more AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image + failed (partially or completely), then an additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + ```google.rpc.Status`` `__ + containing only ``code`` and ``message``\ fields. + + - For Image Object Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. Each + .JSONL file will contain, per line, a JSON representation of a + proto that wraps image's "ID" : "" followed by a list + of zero or more AnnotationPayload protos (called annotations), + which have image_object_detection detail populated. A single + image will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + ```google.rpc.Status`` `__ + containing only ``code`` and ``message``\ fields. + + - For Video Classification: In the created directory a + video_classification.csv file, and a .JSON file per each video + classification requested in the input (i.e. each line in given + CSV(s)), will be created. + + :: + + The format of video_classification.csv is: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 + the prediction input lines (i.e. video_classification.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. + STATUS = "OK" if prediction completed successfully, or an error code + with message otherwise. If STATUS is not "OK" then the .JSON file + for that line may not exist or be empty. + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for the video time segment the file is assigned to in the + video_classification.csv. All AnnotationPayload protos will have + video_classification field set, and will be sorted by + video_classification.type field (note that the returned types are + governed by `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). + + - For Video Object Tracking: In the created directory a + video_object_tracking.csv file will be created, and multiple + files video_object_trackinng_1.json, + video_object_trackinng_2.json,..., video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number + of lines in given CSV(s)). + + :: + + The format of video_object_tracking.csv is: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 + the prediction input lines (i.e. video_object_tracking.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. + STATUS = "OK" if prediction completed successfully, or an error + code with message otherwise. If STATUS is not "OK" then the .JSON + file for that line may not exist or be empty. + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for each frame of the video time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. + + - For Text Classification: In the created directory files + ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in + the text snippet (or document) proto and a list of + zero or more AnnotationPayload protos (called annotations), which + have classification detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any input file (or document) failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only `code` and `message`. + + - For Text Sentiment: In the created directory files + ``text_sentiment_1.jsonl``, + ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will + be created, where N may be 1, and depends on the total number of + inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in + the text snippet (or document) proto and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_sentiment detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any input file (or document) failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only `code` and `message`. + + - For Text Extraction: In the created directory files + ``text_extraction_1.jsonl``, + ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. The contents of these + .JSONL file(s) depend on whether the input used inline text, or + documents. If input was inline, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request text snippet's "id" (if specified), followed by + input text snippet, and a list of zero or more AnnotationPayload + protos (called annotations), which have text_extraction detail + populated. A single text snippet will be listed only once with + all its annotations, and its annotations will never be split + across files. If input used documents, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request document proto, followed by its OCR-ed + representation in the form of a text snippet, finally followed by + a list of zero or more AnnotationPayload protos (called + annotations), which have text_extraction detail populated and + refer, via their indices, to the OCR-ed text snippet. A single + document (and its text snippet) will be listed only once with all + its annotations, and its annotations will never be split across + files. If prediction for any text snippet failed (partially or + completely), then additional ``errors_1.jsonl``, + ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created + (N depends on total number of failed predictions). These files + will have a JSON representation of a proto that wraps either the + "id" : "" (in case of inline) or the document proto (in + case of document) but here followed by exactly one + ```google.rpc.Status`` `__ + containing only ``code`` and ``message``. + + - For Tables: Output depends on whether + [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] + or + [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] + is set (either is allowed). Google Cloud Storage case: In the + created directory files ``tables_1.csv``, ``tables_2.csv``,..., + ``tables_N.csv`` will be created, where N may be 1, and depends + on the total number of the successfully predicted rows. For all + CLASSIFICATION + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + given on input followed by M target column names in the format of + "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>*\ score" + where M is the number of distinct target values, i.e. number of + distinct values in the target column of the table used to train + the model. Subsequent lines will contain the respective values of + successfully predicted rows, with the last, i.e. the target, + columns having the corresponding prediction + [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. + For REGRESSION and FORECASTING + [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + [display_name-s][google.cloud.automl.v1p1beta.display_name] given + on input followed by the predicted target column with name in the + format of + "predicted\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + Subsequent lines will contain the respective values of + successfully predicted rows, with the last, i.e. the target, + column having the predicted target value. If prediction for any + rows failed, then an additional ``errors_1.csv``, + ``errors_2.csv``,..., ``errors_N.csv`` will be created (N depends + on total number of failed rows). These files will have analogous + format as ``tables_*.csv``, but always with a single target + column + having*\ ```google.rpc.Status`` `__\ *represented + as a JSON string, and containing only ``code`` and ``message``. + BigQuery case: + [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project + a new dataset will be created with name + ``prediction__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will + be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and + ``errors``. The ``predictions`` table's column names will be the + input columns' + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + followed by the target column with name in the format of + "predicted*\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + The input feature columns will contain the respective values of + successfully predicted rows, with the target column having an + ARRAY of + [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. + The ``errors`` table contains rows for which the prediction has + failed, it has analogous input columns while the target column + name is in the format of + "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", + and as a value has + ```google.rpc.Status`` `__ + represented as a STRUCT, and containing only ``code`` and + ``message``. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.automl_v1.types.GcsDestination): + Required. The Google Cloud Storage location + of the directory where the output is to be + written to. + + This field is a member of `oneof`_ ``destination``. + """ + + gcs_destination: 'GcsDestination' = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message='GcsDestination', + ) + + +class ModelExportOutputConfig(proto.Message): + r"""Output configuration for ModelExport Action. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.automl_v1.types.GcsDestination): + Required. The Google Cloud Storage location where the model + is to be written to. This location may only be set for the + following model formats: "tflite", "edgetpu_tflite", + "tf_saved_model", "tf_js", "core_ml". + + Under the directory given as the destination a new one with + name "model-export--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. + Inside the model and any of its supporting files will be + written. + + This field is a member of `oneof`_ ``destination``. + model_format (str): + The format in which the model must be exported. The + available, and default, formats depend on the problem and + model type (if given problem and type combination doesn't + have a format listed, it means its models are not + exportable): + + - For Image Classification mobile-low-latency-1, + mobile-versatile-1, mobile-high-accuracy-1: "tflite" + (default), "edgetpu_tflite", "tf_saved_model", "tf_js", + "docker". + + - For Image Classification mobile-core-ml-low-latency-1, + mobile-core-ml-versatile-1, + mobile-core-ml-high-accuracy-1: "core_ml" (default). + + - For Image Object Detection mobile-low-latency-1, + mobile-versatile-1, mobile-high-accuracy-1: "tflite", + "tf_saved_model", "tf_js". Formats description: + + - tflite - Used for Android mobile devices. + + - edgetpu_tflite - Used for `Edge + TPU `__ devices. + + - tf_saved_model - A tensorflow model in SavedModel format. + + - tf_js - A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + + - docker - Used for Docker containers. Use the params field + to customize the container. The container is verified to + work correctly on ubuntu 16.04 operating system. See more + at `containers + quickstart `__ + + - core_ml - Used for iOS mobile devices. + params (MutableMapping[str, str]): + Additional model-type and format specific parameters + describing the requirements for the to be exported model + files, any string must be up to 25000 characters long. + + - For ``docker`` format: ``cpu_architecture`` - (string) + "x86_64" (default). ``gpu_architecture`` - (string) + "none" (default), "nvidia". + """ + + gcs_destination: 'GcsDestination' = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message='GcsDestination', + ) + model_format: str = proto.Field( + proto.STRING, + number=4, + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + + Attributes: + input_uris (MutableSequence[str]): + Required. Google Cloud Storage URIs to input files, up to + 2000 characters long. Accepted forms: + + - Full object path, e.g. gs://bucket/directory/object.csv + """ + + input_uris: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output directory, up + to 2000 characters long. Accepted forms: + + - Prefix path: gs://bucket/directory The requesting user + must have write permission to the bucket. The directory + is created if it doesn't exist. + """ + + output_uri_prefix: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/model.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/model.py new file mode 100644 index 00000000..ed64311b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/model.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import translation +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'Model', + }, +) + + +class Model(proto.Message): + r"""API proto representing a trained machine learning model. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + translation_model_metadata (google.cloud.automl_v1.types.TranslationModelMetadata): + Metadata for translation models. + + This field is a member of `oneof`_ ``model_metadata``. + image_classification_model_metadata (google.cloud.automl_v1.types.ImageClassificationModelMetadata): + Metadata for image classification models. + + This field is a member of `oneof`_ ``model_metadata``. + text_classification_model_metadata (google.cloud.automl_v1.types.TextClassificationModelMetadata): + Metadata for text classification models. + + This field is a member of `oneof`_ ``model_metadata``. + image_object_detection_model_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelMetadata): + Metadata for image object detection models. + + This field is a member of `oneof`_ ``model_metadata``. + text_extraction_model_metadata (google.cloud.automl_v1.types.TextExtractionModelMetadata): + Metadata for text extraction models. + + This field is a member of `oneof`_ ``model_metadata``. + text_sentiment_model_metadata (google.cloud.automl_v1.types.TextSentimentModelMetadata): + Metadata for text sentiment models. + + This field is a member of `oneof`_ ``model_metadata``. + name (str): + Output only. Resource name of the model. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + display_name (str): + Required. The name of the model to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. It must start with a letter. + dataset_id (str): + Required. The resource ID of the dataset used + to create the model. The dataset must come from + the same ancestor project and location. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the model + training finished and can be used for + prediction. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this model was + last updated. + deployment_state (google.cloud.automl_v1.types.Model.DeploymentState): + Output only. Deployment state of the model. A + model can only serve prediction requests after + it gets deployed. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (MutableMapping[str, str]): + Optional. The labels with user-defined + metadata to organize your model. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. Label values are optional. Label + keys must start with a letter. + + See https://goo.gl/xmQnxf for more information + on and examples of labels. + """ + class DeploymentState(proto.Enum): + r"""Deployment state of the model. + + Values: + DEPLOYMENT_STATE_UNSPECIFIED (0): + Should not be used, an un-set enum has this + value by default. + DEPLOYED (1): + Model is deployed. + UNDEPLOYED (2): + Model is not deployed. + """ + DEPLOYMENT_STATE_UNSPECIFIED = 0 + DEPLOYED = 1 + UNDEPLOYED = 2 + + translation_model_metadata: translation.TranslationModelMetadata = proto.Field( + proto.MESSAGE, + number=15, + oneof='model_metadata', + message=translation.TranslationModelMetadata, + ) + image_classification_model_metadata: image.ImageClassificationModelMetadata = proto.Field( + proto.MESSAGE, + number=13, + oneof='model_metadata', + message=image.ImageClassificationModelMetadata, + ) + text_classification_model_metadata: text.TextClassificationModelMetadata = proto.Field( + proto.MESSAGE, + number=14, + oneof='model_metadata', + message=text.TextClassificationModelMetadata, + ) + image_object_detection_model_metadata: image.ImageObjectDetectionModelMetadata = proto.Field( + proto.MESSAGE, + number=20, + oneof='model_metadata', + message=image.ImageObjectDetectionModelMetadata, + ) + text_extraction_model_metadata: text.TextExtractionModelMetadata = proto.Field( + proto.MESSAGE, + number=19, + oneof='model_metadata', + message=text.TextExtractionModelMetadata, + ) + text_sentiment_model_metadata: text.TextSentimentModelMetadata = proto.Field( + proto.MESSAGE, + number=22, + oneof='model_metadata', + message=text.TextSentimentModelMetadata, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + dataset_id: str = proto.Field( + proto.STRING, + number=3, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + deployment_state: DeploymentState = proto.Field( + proto.ENUM, + number=8, + enum=DeploymentState, + ) + etag: str = proto.Field( + proto.STRING, + number=10, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=34, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py new file mode 100644 index 00000000..ea4aca07 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import classification +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import text_extraction +from google.cloud.automl_v1.types import text_sentiment +from google.cloud.automl_v1.types import translation +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'ModelEvaluation', + }, +) + + +class ModelEvaluation(proto.Message): + r"""Evaluation results of a model. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + classification_evaluation_metrics (google.cloud.automl_v1.types.ClassificationEvaluationMetrics): + Model evaluation metrics for image, text, + video and tables classification. + Tables problem is considered a classification + when the target column is CATEGORY DataType. + + This field is a member of `oneof`_ ``metrics``. + translation_evaluation_metrics (google.cloud.automl_v1.types.TranslationEvaluationMetrics): + Model evaluation metrics for translation. + + This field is a member of `oneof`_ ``metrics``. + image_object_detection_evaluation_metrics (google.cloud.automl_v1.types.ImageObjectDetectionEvaluationMetrics): + Model evaluation metrics for image object + detection. + + This field is a member of `oneof`_ ``metrics``. + text_sentiment_evaluation_metrics (google.cloud.automl_v1.types.TextSentimentEvaluationMetrics): + Evaluation metrics for text sentiment models. + + This field is a member of `oneof`_ ``metrics``. + text_extraction_evaluation_metrics (google.cloud.automl_v1.types.TextExtractionEvaluationMetrics): + Evaluation metrics for text extraction + models. + + This field is a member of `oneof`_ ``metrics``. + name (str): + Output only. Resource name of the model evaluation. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`` + annotation_spec_id (str): + Output only. The ID of the annotation spec that the model + evaluation applies to. The The ID is empty for the overall + model evaluation. For Tables annotation specs in the dataset + do not exist and this ID is always not set, but for + CLASSIFICATION + [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + the + [display_name][google.cloud.automl.v1.ModelEvaluation.display_name] + field is used. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1.AnnotationSpec.display_name] + at the moment when the model was trained. Because this field + returns a value at model training time, for different models + trained from the same dataset, the values may differ, since + display names could had been changed between the two model's + trainings. For Tables CLASSIFICATION + [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + distinct values of the target column at the moment of the + model evaluation are populated here. The display_name is + empty for the overall model evaluation. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this model + evaluation was created. + evaluated_example_count (int): + Output only. The number of examples used for model + evaluation, i.e. for which ground truth from time of model + creation is compared against the predicted annotations + created by the model. For overall ModelEvaluation (i.e. with + annotation_spec_id not set) this is the total number of all + examples used for evaluation. Otherwise, this is the count + of examples that according to the ground truth were + annotated by the + [annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id]. + """ + + classification_evaluation_metrics: classification.ClassificationEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=8, + oneof='metrics', + message=classification.ClassificationEvaluationMetrics, + ) + translation_evaluation_metrics: translation.TranslationEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=9, + oneof='metrics', + message=translation.TranslationEvaluationMetrics, + ) + image_object_detection_evaluation_metrics: detection.ImageObjectDetectionEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=12, + oneof='metrics', + message=detection.ImageObjectDetectionEvaluationMetrics, + ) + text_sentiment_evaluation_metrics: text_sentiment.TextSentimentEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=11, + oneof='metrics', + message=text_sentiment.TextSentimentEvaluationMetrics, + ) + text_extraction_evaluation_metrics: text_extraction.TextExtractionEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=13, + oneof='metrics', + message=text_extraction.TextExtractionEvaluationMetrics, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + annotation_spec_id: str = proto.Field( + proto.STRING, + number=2, + ) + display_name: str = proto.Field( + proto.STRING, + number=15, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + evaluated_example_count: int = proto.Field( + proto.INT32, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py new file mode 100644 index 00000000..84fab23a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py @@ -0,0 +1,330 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import io +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'OperationMetadata', + 'DeleteOperationMetadata', + 'DeployModelOperationMetadata', + 'UndeployModelOperationMetadata', + 'CreateDatasetOperationMetadata', + 'CreateModelOperationMetadata', + 'ImportDataOperationMetadata', + 'ExportDataOperationMetadata', + 'BatchPredictOperationMetadata', + 'ExportModelOperationMetadata', + }, +) + + +class OperationMetadata(proto.Message): + r"""Metadata used across all long running operations returned by + AutoML API. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + delete_details (google.cloud.automl_v1.types.DeleteOperationMetadata): + Details of a Delete operation. + + This field is a member of `oneof`_ ``details``. + deploy_model_details (google.cloud.automl_v1.types.DeployModelOperationMetadata): + Details of a DeployModel operation. + + This field is a member of `oneof`_ ``details``. + undeploy_model_details (google.cloud.automl_v1.types.UndeployModelOperationMetadata): + Details of an UndeployModel operation. + + This field is a member of `oneof`_ ``details``. + create_model_details (google.cloud.automl_v1.types.CreateModelOperationMetadata): + Details of CreateModel operation. + + This field is a member of `oneof`_ ``details``. + create_dataset_details (google.cloud.automl_v1.types.CreateDatasetOperationMetadata): + Details of CreateDataset operation. + + This field is a member of `oneof`_ ``details``. + import_data_details (google.cloud.automl_v1.types.ImportDataOperationMetadata): + Details of ImportData operation. + + This field is a member of `oneof`_ ``details``. + batch_predict_details (google.cloud.automl_v1.types.BatchPredictOperationMetadata): + Details of BatchPredict operation. + + This field is a member of `oneof`_ ``details``. + export_data_details (google.cloud.automl_v1.types.ExportDataOperationMetadata): + Details of ExportData operation. + + This field is a member of `oneof`_ ``details``. + export_model_details (google.cloud.automl_v1.types.ExportModelOperationMetadata): + Details of ExportModel operation. + + This field is a member of `oneof`_ ``details``. + progress_percent (int): + Output only. Progress of operation. Range: [0, 100]. Not + used currently. + partial_failures (MutableSequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + updated for the last time. + """ + + delete_details: 'DeleteOperationMetadata' = proto.Field( + proto.MESSAGE, + number=8, + oneof='details', + message='DeleteOperationMetadata', + ) + deploy_model_details: 'DeployModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=24, + oneof='details', + message='DeployModelOperationMetadata', + ) + undeploy_model_details: 'UndeployModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=25, + oneof='details', + message='UndeployModelOperationMetadata', + ) + create_model_details: 'CreateModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=10, + oneof='details', + message='CreateModelOperationMetadata', + ) + create_dataset_details: 'CreateDatasetOperationMetadata' = proto.Field( + proto.MESSAGE, + number=30, + oneof='details', + message='CreateDatasetOperationMetadata', + ) + import_data_details: 'ImportDataOperationMetadata' = proto.Field( + proto.MESSAGE, + number=15, + oneof='details', + message='ImportDataOperationMetadata', + ) + batch_predict_details: 'BatchPredictOperationMetadata' = proto.Field( + proto.MESSAGE, + number=16, + oneof='details', + message='BatchPredictOperationMetadata', + ) + export_data_details: 'ExportDataOperationMetadata' = proto.Field( + proto.MESSAGE, + number=21, + oneof='details', + message='ExportDataOperationMetadata', + ) + export_model_details: 'ExportModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=22, + oneof='details', + message='ExportModelOperationMetadata', + ) + progress_percent: int = proto.Field( + proto.INT32, + number=13, + ) + partial_failures: MutableSequence[status_pb2.Status] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities. + """ + + +class DeployModelOperationMetadata(proto.Message): + r"""Details of DeployModel operation. + """ + + +class UndeployModelOperationMetadata(proto.Message): + r"""Details of UndeployModel operation. + """ + + +class CreateDatasetOperationMetadata(proto.Message): + r"""Details of CreateDataset operation. + """ + + +class CreateModelOperationMetadata(proto.Message): + r"""Details of CreateModel operation. + """ + + +class ImportDataOperationMetadata(proto.Message): + r"""Details of ImportData operation. + """ + + +class ExportDataOperationMetadata(proto.Message): + r"""Details of ExportData operation. + + Attributes: + output_info (google.cloud.automl_v1.types.ExportDataOperationMetadata.ExportDataOutputInfo): + Output only. Information further describing + this export data's output. + """ + + class ExportDataOutputInfo(proto.Message): + r"""Further describes this export data's output. Supplements + [OutputConfig][google.cloud.automl.v1.OutputConfig]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the exported data + is written. + + This field is a member of `oneof`_ ``output_location``. + """ + + gcs_output_directory: str = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + + output_info: ExportDataOutputInfo = proto.Field( + proto.MESSAGE, + number=1, + message=ExportDataOutputInfo, + ) + + +class BatchPredictOperationMetadata(proto.Message): + r"""Details of BatchPredict operation. + + Attributes: + input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): + Output only. The input config that was given + upon starting this batch predict operation. + output_info (google.cloud.automl_v1.types.BatchPredictOperationMetadata.BatchPredictOutputInfo): + Output only. Information further describing + this batch predict's output. + """ + + class BatchPredictOutputInfo(proto.Message): + r"""Further describes this batch predict's output. Supplements + [BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the prediction + output is written. + + This field is a member of `oneof`_ ``output_location``. + """ + + gcs_output_directory: str = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + + input_config: io.BatchPredictInputConfig = proto.Field( + proto.MESSAGE, + number=1, + message=io.BatchPredictInputConfig, + ) + output_info: BatchPredictOutputInfo = proto.Field( + proto.MESSAGE, + number=2, + message=BatchPredictOutputInfo, + ) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of ExportModel operation. + + Attributes: + output_info (google.cloud.automl_v1.types.ExportModelOperationMetadata.ExportModelOutputInfo): + Output only. Information further describing + the output of this model export. + """ + + class ExportModelOutputInfo(proto.Message): + r"""Further describes the output of model export. Supplements + [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the model will be + exported. + """ + + gcs_output_directory: str = proto.Field( + proto.STRING, + number=1, + ) + + output_info: ExportModelOutputInfo = proto.Field( + proto.MESSAGE, + number=2, + message=ExportModelOutputInfo, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py new file mode 100644 index 00000000..c8dc1db3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import io + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'PredictRequest', + 'PredictResponse', + 'BatchPredictRequest', + 'BatchPredictResult', + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the prediction. + payload (google.cloud.automl_v1.types.ExamplePayload): + Required. Payload to perform a prediction on. + The payload must match the problem type that the + model was trained to solve. + params (MutableMapping[str, str]): + Additional domain-specific parameters, any string must be up + to 25000 characters long. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for an image, it will only + produce results that have at least this confidence score. + The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects on + the image, it will only produce bounding boxes which have at + least this confidence score. Value in 0 to 1 range, default + is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned. The default is 100. The number of + returned bounding boxes might be limited by the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] + is populated in the returned list of + [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] + objects. The default is false. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + payload: data_items.ExamplePayload = proto.Field( + proto.MESSAGE, + number=2, + message=data_items.ExamplePayload, + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. + + Attributes: + payload (MutableSequence[google.cloud.automl_v1.types.AnnotationPayload]): + Prediction result. + AutoML Translation and AutoML Natural Language + Sentiment Analysis return precisely one payload. + preprocessed_input (google.cloud.automl_v1.types.ExamplePayload): + The preprocessed example that AutoML actually makes + prediction on. Empty if AutoML does not preprocess the input + example. + + For AutoML Natural Language (Classification, Entity + Extraction, and Sentiment Analysis), if the input is a + document, the recognized text is returned in the + [document_text][google.cloud.automl.v1.Document.document_text] + property. + metadata (MutableMapping[str, str]): + Additional domain-specific prediction response metadata. + + AutoML Vision Object Detection + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes to return per image. + + AutoML Natural Language Sentiment Analysis + + ``sentiment_score`` : (float, deprecated) A value between -1 + and 1, -1 maps to least positive sentiment, while 1 maps to + the most positive one and the higher the score, the more + positive the sentiment in the document is. Yet these values + are relative to the training data, so e.g. if all data was + positive then -1 is also positive (though the least). + ``sentiment_score`` is not the same as "score" and + "magnitude" from Sentiment Analysis in the Natural Language + API. + """ + + payload: MutableSequence[annotation_payload.AnnotationPayload] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=annotation_payload.AnnotationPayload, + ) + preprocessed_input: data_items.ExamplePayload = proto.Field( + proto.MESSAGE, + number=3, + message=data_items.ExamplePayload, + ) + metadata: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class BatchPredictRequest(proto.Message): + r"""Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the batch prediction. + input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): + Required. The input configuration for batch + prediction. + output_config (google.cloud.automl_v1.types.BatchPredictOutputConfig): + Required. The Configuration specifying where + output predictions should be written. + params (MutableMapping[str, str]): + Additional domain-specific parameters for the predictions, + any string must be up to 25000 characters long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for a text snippet, it will only + produce results that have at least this confidence score. + The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for an image, it will only + produce results that have at least this confidence score. + The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects on + the image, it will only produce bounding boxes which have at + least this confidence score. Value in 0 to 1 range, default + is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per image. The default is 100, the + number of bounding boxes returned might be limited by the + server. AutoML Video Intelligence Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When + the model makes predictions for a video, it will only + produce results that have at least this confidence score. + The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence scores for + the entire segment of the video that user specified in the + request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to request + shot-level classification. AutoML Video Intelligence + determines the boundaries for each camera shot in the entire + segment of the video that user specified in the request + configuration. AutoML Video Intelligence then returns labels + and their confidence scores for each detected shot, along + with the start and end time of the shot. The default is + false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on training + data, but there are no metrics provided to describe that + quality. + + ``1s_interval_classification`` : (boolean) Set to true to + request classification for a video at one-second intervals. + AutoML Video Intelligence returns labels and their + confidence scores for each second of the entire segment of + the video that user specified in the request configuration. + The default is false. + + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on training + data, but there are no metrics provided to describe that + quality. + + AutoML Video Intelligence Object Tracking + + ``score_threshold`` : (float) When Model detects objects on + video frames, it will only produce bounding boxes which have + at least this confidence score. Value in 0 to 1 range, + default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per image. The default is 100, the + number of bounding boxes returned might be limited by the + server. + + ``min_bounding_box_size`` : (float) Only bounding boxes with + shortest edge at least that long as a relative value of + video frame size are returned. Value in 0 to 1 range. + Default is 0. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + input_config: io.BatchPredictInputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.BatchPredictInputConfig, + ) + output_config: io.BatchPredictOutputConfig = proto.Field( + proto.MESSAGE, + number=4, + message=io.BatchPredictOutputConfig, + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + + +class BatchPredictResult(proto.Message): + r"""Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the operation + returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + + Attributes: + metadata (MutableMapping[str, str]): + Additional domain-specific prediction response metadata. + + AutoML Vision Object Detection + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per image. + + AutoML Video Intelligence Object Tracking + + ``max_bounding_box_count`` : (int64) The maximum number of + bounding boxes returned per frame. + """ + + metadata: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/service.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/service.py new file mode 100644 index 00000000..6bb29c51 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/service.py @@ -0,0 +1,621 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation as gca_model_evaluation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'CreateDatasetRequest', + 'GetDatasetRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeleteDatasetRequest', + 'ImportDataRequest', + 'ExportDataRequest', + 'GetAnnotationSpecRequest', + 'CreateModelRequest', + 'GetModelRequest', + 'ListModelsRequest', + 'ListModelsResponse', + 'DeleteModelRequest', + 'UpdateModelRequest', + 'DeployModelRequest', + 'UndeployModelRequest', + 'ExportModelRequest', + 'GetModelEvaluationRequest', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the project to + create the dataset for. + dataset (google.cloud.automl_v1.types.Dataset): + Required. The dataset to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + dataset: gca_dataset.Dataset = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.Dataset, + ) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + retrieve. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Attributes: + parent (str): + Required. The resource name of the project + from which to list datasets. + filter (str): + An expression for filtering the results of the request. + + - ``dataset_metadata`` - for existence of the case (e.g. + ``image_classification_dataset_metadata:*``). Some + examples of using the filter are: + + - ``translation_dataset_metadata:*`` --> The dataset has + ``translation_dataset_metadata``. + page_size (int): + Requested page size. Server may return fewer + results than requested. If unspecified, server + will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] + of the previous + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. + + Attributes: + datasets (MutableSequence[google.cloud.automl_v1.types.Dataset]): + The datasets read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + datasets: MutableSequence[gca_dataset.Dataset] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] + + Attributes: + dataset (google.cloud.automl_v1.types.Dataset): + Required. The dataset which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + dataset: gca_dataset.Dataset = proto.Field( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + delete. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. + + Attributes: + name (str): + Required. Dataset name. Dataset must already + exist. All imported annotations and examples + will be added. + input_config (google.cloud.automl_v1.types.InputConfig): + Required. The desired input location and its + domain specific semantics, if any. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + input_config: io.InputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.InputConfig, + ) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. + + Attributes: + name (str): + Required. The resource name of the dataset. + output_config (google.cloud.automl_v1.types.OutputConfig): + Required. The desired output location. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + output_config: io.OutputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.OutputConfig, + ) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The resource name of the annotation + spec to retrieve. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateModelRequest(proto.Message): + r"""Request message for + [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. + + Attributes: + parent (str): + Required. Resource name of the parent project + where the model is being created. + model (google.cloud.automl_v1.types.Model): + Required. The model to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + model: gca_model.Model = proto.Field( + proto.MESSAGE, + number=4, + message=gca_model.Model, + ) + + +class GetModelRequest(proto.Message): + r"""Request message for + [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. + + Attributes: + name (str): + Required. Resource name of the model. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Attributes: + parent (str): + Required. Resource name of the project, from + which to list the models. + filter (str): + An expression for filtering the results of the request. + + - ``model_metadata`` - for existence of the case (e.g. + ``video_classification_model_metadata:*``). + + - ``dataset_id`` - for = or !=. Some examples of using the + filter are: + + - ``image_classification_model_metadata:*`` --> The model + has ``image_classification_model_metadata``. + + - ``dataset_id=5`` --> The model was created from a dataset + with ID 5. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] + of the previous + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. + + Attributes: + model (MutableSequence[google.cloud.automl_v1.types.Model]): + List of models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model: MutableSequence[gca_model.Model] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. + + Attributes: + name (str): + Required. Resource name of the model being + deleted. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateModelRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] + + Attributes: + model (google.cloud.automl_v1.types.Model): + Required. The model which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + model: gca_model.Model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + image_object_detection_model_deployment_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata): + Model deployment metadata specific to Image + Object Detection. + + This field is a member of `oneof`_ ``model_deployment_metadata``. + image_classification_model_deployment_metadata (google.cloud.automl_v1.types.ImageClassificationModelDeploymentMetadata): + Model deployment metadata specific to Image + Classification. + + This field is a member of `oneof`_ ``model_deployment_metadata``. + name (str): + Required. Resource name of the model to + deploy. + """ + + image_object_detection_model_deployment_metadata: image.ImageObjectDetectionModelDeploymentMetadata = proto.Field( + proto.MESSAGE, + number=2, + oneof='model_deployment_metadata', + message=image.ImageObjectDetectionModelDeploymentMetadata, + ) + image_classification_model_deployment_metadata: image.ImageClassificationModelDeploymentMetadata = proto.Field( + proto.MESSAGE, + number=4, + oneof='model_deployment_metadata', + message=image.ImageClassificationModelDeploymentMetadata, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. + + Attributes: + name (str): + Required. Resource name of the model to + undeploy. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an error code + will be returned. + + Attributes: + name (str): + Required. The resource name of the model to + export. + output_config (google.cloud.automl_v1.types.ModelExportOutputConfig): + Required. The desired output location and + configuration. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + output_config: io.ModelExportOutputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.ModelExportOutputConfig, + ) + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. + + Attributes: + name (str): + Required. Resource name for the model + evaluation. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Attributes: + parent (str): + Required. Resource name of the model to list + the model evaluations for. If modelId is set as + "-", this will list model evaluations from + across all models of the parent location. + filter (str): + Required. An expression for filtering the results of the + request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation was + done for annotation spec with ID different than 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation was + done for aggregate of all annotation specs. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] + of the previous + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. + + Attributes: + model_evaluation (MutableSequence[google.cloud.automl_v1.types.ModelEvaluation]): + List of model evaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to the + [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] + field of a new + [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] + request to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation: MutableSequence[gca_model_evaluation.ModelEvaluation] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_evaluation.ModelEvaluation, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text.py new file mode 100644 index 00000000..0773af68 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/text.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import classification + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'TextClassificationDatasetMetadata', + 'TextClassificationModelMetadata', + 'TextExtractionDatasetMetadata', + 'TextExtractionModelMetadata', + 'TextSentimentDatasetMetadata', + 'TextSentimentModelMetadata', + }, +) + + +class TextClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata for classification. + + Attributes: + classification_type (google.cloud.automl_v1.types.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type: classification.ClassificationType = proto.Field( + proto.ENUM, + number=1, + enum=classification.ClassificationType, + ) + + +class TextClassificationModelMetadata(proto.Message): + r"""Model metadata that is specific to text classification. + + Attributes: + classification_type (google.cloud.automl_v1.types.ClassificationType): + Output only. Classification type of the + dataset used to train this model. + """ + + classification_type: classification.ClassificationType = proto.Field( + proto.ENUM, + number=3, + enum=classification.ClassificationType, + ) + + +class TextExtractionDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to text extraction + """ + + +class TextExtractionModelMetadata(proto.Message): + r"""Model metadata that is specific to text extraction. + """ + + +class TextSentimentDatasetMetadata(proto.Message): + r"""Dataset metadata for text sentiment. + + Attributes: + sentiment_max (int): + Required. A sentiment is expressed as an integer ordinal, + where higher value means a more positive sentiment. The + range of sentiments that will be used is between 0 and + sentiment_max (inclusive on both ends), and all the values + in the range must be represented in the dataset before a + model can be created. sentiment_max value must be between 1 + and 10 (inclusive). + """ + + sentiment_max: int = proto.Field( + proto.INT32, + number=1, + ) + + +class TextSentimentModelMetadata(proto.Message): + r"""Model metadata that is specific to text sentiment. + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py new file mode 100644 index 00000000..221420c7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import text_segment as gca_text_segment + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'TextExtractionAnnotation', + 'TextExtractionEvaluationMetrics', + }, +) + + +class TextExtractionAnnotation(proto.Message): + r"""Annotation for identifying spans of text. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text_segment (google.cloud.automl_v1.types.TextSegment): + An entity annotation will set this, which is + the part of the original text to which the + annotation pertains. + + This field is a member of `oneof`_ ``annotation``. + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence in correctness of the annotation. + """ + + text_segment: gca_text_segment.TextSegment = proto.Field( + proto.MESSAGE, + number=3, + oneof='annotation', + message=gca_text_segment.TextSegment, + ) + score: float = proto.Field( + proto.FLOAT, + number=1, + ) + + +class TextExtractionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text extraction problems. + + Attributes: + au_prc (float): + Output only. The Area under precision recall + curve metric. + confidence_metrics_entries (MutableSequence[google.cloud.automl_v1.types.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics that have confidence + thresholds. Precision-recall curve can be + derived from it. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. Only annotations + with score of at least this threshold are + considered to be ones the model would return. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + recall: float = proto.Field( + proto.FLOAT, + number=3, + ) + precision: float = proto.Field( + proto.FLOAT, + number=4, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=5, + ) + + au_prc: float = proto.Field( + proto.FLOAT, + number=1, + ) + confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=ConfidenceMetricsEntry, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py new file mode 100644 index 00000000..3d1d4a11 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'TextSegment', + }, +) + + +class TextSegment(proto.Message): + r"""A contiguous part of a text (string), assuming it has an + UTF-8 NFC encoding. + + Attributes: + content (str): + Output only. The content of the TextSegment. + start_offset (int): + Required. Zero-based character index of the + first character of the text segment (counting + characters from the beginning of the text). + end_offset (int): + Required. Zero-based character index of the first character + past the end of the text segment (counting character from + the beginning of the text). The character at the end_offset + is NOT included in the text segment. + """ + + content: str = proto.Field( + proto.STRING, + number=3, + ) + start_offset: int = proto.Field( + proto.INT64, + number=1, + ) + end_offset: int = proto.Field( + proto.INT64, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py new file mode 100644 index 00000000..98289622 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import classification + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'TextSentimentAnnotation', + 'TextSentimentEvaluationMetrics', + }, +) + + +class TextSentimentAnnotation(proto.Message): + r"""Contains annotation details specific to text sentiment. + + Attributes: + sentiment (int): + Output only. The sentiment with the semantic, as given to + the + [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] + when populating the dataset from which the model used for + the prediction had been trained. The sentiment values are + between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive), with higher value meaning more positive + sentiment. They are completely relative, i.e. 0 means least + positive sentiment and sentiment_max means the most positive + from the sentiments present in the train data. Therefore + e.g. if train data had only negative sentiment, then + sentiment_max, would be still negative (although least + negative). The sentiment shouldn't be confused with "score" + or "magnitude" from the previous Natural Language Sentiment + Analysis API. + """ + + sentiment: int = proto.Field( + proto.INT32, + number=1, + ) + + +class TextSentimentEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text sentiment problems. + + Attributes: + precision (float): + Output only. Precision. + recall (float): + Output only. Recall. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + mean_absolute_error (float): + Output only. Mean absolute error. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + mean_squared_error (float): + Output only. Mean squared error. Only set for + the overall model evaluation, not for evaluation + of a single annotation spec. + linear_kappa (float): + Output only. Linear weighted kappa. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + quadratic_kappa (float): + Output only. Quadratic weighted kappa. Only + set for the overall model evaluation, not for + evaluation of a single annotation spec. + confusion_matrix (google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for the overall model + evaluation, not for evaluation of a single + annotation spec. + """ + + precision: float = proto.Field( + proto.FLOAT, + number=1, + ) + recall: float = proto.Field( + proto.FLOAT, + number=2, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=3, + ) + mean_absolute_error: float = proto.Field( + proto.FLOAT, + number=4, + ) + mean_squared_error: float = proto.Field( + proto.FLOAT, + number=5, + ) + linear_kappa: float = proto.Field( + proto.FLOAT, + number=6, + ) + quadratic_kappa: float = proto.Field( + proto.FLOAT, + number=7, + ) + confusion_matrix: classification.ClassificationEvaluationMetrics.ConfusionMatrix = proto.Field( + proto.MESSAGE, + number=8, + message=classification.ClassificationEvaluationMetrics.ConfusionMatrix, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py new file mode 100644 index 00000000..12810026 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1.types import data_items + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1', + manifest={ + 'TranslationDatasetMetadata', + 'TranslationEvaluationMetrics', + 'TranslationModelMetadata', + 'TranslationAnnotation', + }, +) + + +class TranslationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to translation. + + Attributes: + source_language_code (str): + Required. The BCP-47 language code of the + source language. + target_language_code (str): + Required. The BCP-47 language code of the + target language. + """ + + source_language_code: str = proto.Field( + proto.STRING, + number=1, + ) + target_language_code: str = proto.Field( + proto.STRING, + number=2, + ) + + +class TranslationEvaluationMetrics(proto.Message): + r"""Evaluation metrics for the dataset. + + Attributes: + bleu_score (float): + Output only. BLEU score. + base_bleu_score (float): + Output only. BLEU score for base model. + """ + + bleu_score: float = proto.Field( + proto.DOUBLE, + number=1, + ) + base_bleu_score: float = proto.Field( + proto.DOUBLE, + number=2, + ) + + +class TranslationModelMetadata(proto.Message): + r"""Model metadata that is specific to translation. + + Attributes: + base_model (str): + The resource name of the model to use as a baseline to train + the custom model. If unset, we use the default base model + provided by Google Translate. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + source_language_code (str): + Output only. Inferred from the dataset. + The source language (The BCP-47 language code) + that is used for training. + target_language_code (str): + Output only. The target language (The BCP-47 + language code) that is used for training. + """ + + base_model: str = proto.Field( + proto.STRING, + number=1, + ) + source_language_code: str = proto.Field( + proto.STRING, + number=2, + ) + target_language_code: str = proto.Field( + proto.STRING, + number=3, + ) + + +class TranslationAnnotation(proto.Message): + r"""Annotation details specific to translation. + + Attributes: + translated_content (google.cloud.automl_v1.types.TextSnippet): + Output only . The translated content. + """ + + translated_content: data_items.TextSnippet = proto.Field( + proto.MESSAGE, + number=1, + message=data_items.TextSnippet, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini new file mode 100644 index 00000000..574c5aed --- /dev/null +++ b/owl-bot-staging/v1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.7 +namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py new file mode 100644 index 00000000..42f7a58a --- /dev/null +++ b/owl-bot-staging/v1/noxfile.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +ALL_PYTHON = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", +] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + +BLACK_VERSION = "black==22.3.0" +BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] +DEFAULT_PYTHON_VERSION = "3.11" + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", + "blacken", + "lint", + "lint_setup_py", +] + +@nox.session(python=ALL_PYTHON) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/automl_v1/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=ALL_PYTHON) +def mypy(session): + """Run the type checker.""" + session.install( + 'mypy', + 'types-requests', + 'types-protobuf' + ) + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx==7.0.1", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run("flake8", "google", "tests", "samples") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py new file mode 100644 index 00000000..781bfeff --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_CreateDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_create_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_CreateDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py new file mode 100644 index 00000000..55de89c2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_CreateDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_create_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_CreateDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py new file mode 100644 index 00000000..6c5b56d9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_CreateModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_create_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_CreateModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py new file mode 100644 index 00000000..38b2d1d9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_CreateModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_create_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_CreateModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py new file mode 100644 index 00000000..52a75977 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_DeleteDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_delete_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_DeleteDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py new file mode 100644 index 00000000..e24a7e8f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_DeleteDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_delete_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_DeleteDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py new file mode 100644 index 00000000..ff01ec1e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_DeleteModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_delete_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_DeleteModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py new file mode 100644 index 00000000..e4ff940c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_DeleteModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_delete_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_DeleteModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py new file mode 100644 index 00000000..5fdcf9eb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_DeployModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_deploy_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_DeployModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py new file mode 100644 index 00000000..2fab8897 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_DeployModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_deploy_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_DeployModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py new file mode 100644 index 00000000..c5b0913e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ExportData_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_export_data(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + output_config = automl_v1.OutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportDataRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_ExportData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py new file mode 100644 index 00000000..e9687c7a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ExportData_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_export_data(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + output_config = automl_v1.OutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportDataRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_ExportData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py new file mode 100644 index 00000000..df0d73fb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ExportModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_export_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + output_config = automl_v1.ModelExportOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportModelRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_ExportModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py new file mode 100644 index 00000000..19cca495 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ExportModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_export_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + output_config = automl_v1.ModelExportOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.ExportModelRequest( + name="name_value", + output_config=output_config, + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_ExportModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py new file mode 100644 index 00000000..ff09b999 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetAnnotationSpec_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_get_annotation_spec(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetAnnotationSpec_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py new file mode 100644 index 00000000..2c75f200 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetAnnotationSpec_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_get_annotation_spec(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetAnnotationSpec_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py new file mode 100644 index 00000000..911401d8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_get_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py new file mode 100644 index 00000000..990253b5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_get_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py new file mode 100644 index 00000000..2059f160 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_get_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py new file mode 100644 index 00000000..9758caa9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetModelEvaluation_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_get_model_evaluation(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetModelEvaluation_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py new file mode 100644 index 00000000..0e8000eb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetModelEvaluation_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_get_model_evaluation(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetModelEvaluation_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py new file mode 100644 index 00000000..692326b9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_GetModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_get_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_GetModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py new file mode 100644 index 00000000..6966d258 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ImportData_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_import_data(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + input_config = automl_v1.InputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + request = automl_v1.ImportDataRequest( + name="name_value", + input_config=input_config, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_ImportData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py new file mode 100644 index 00000000..05570793 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ImportData_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_import_data(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + input_config = automl_v1.InputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + request = automl_v1.ImportDataRequest( + name="name_value", + input_config=input_config, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_ImportData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py new file mode 100644 index 00000000..1f30e64d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ListDatasets_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_list_datasets(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1_generated_AutoMl_ListDatasets_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py new file mode 100644 index 00000000..443cf025 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ListDatasets_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_list_datasets(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1_generated_AutoMl_ListDatasets_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py new file mode 100644 index 00000000..2d6b409d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ListModelEvaluations_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_list_model_evaluations(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.ListModelEvaluationsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1_generated_AutoMl_ListModelEvaluations_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py new file mode 100644 index 00000000..4d829cba --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ListModelEvaluations_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_list_model_evaluations(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.ListModelEvaluationsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1_generated_AutoMl_ListModelEvaluations_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py new file mode 100644 index 00000000..c6d25d99 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ListModels_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_list_models(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1_generated_AutoMl_ListModels_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py new file mode 100644 index 00000000..5518b988 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_ListModels_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_list_models(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1_generated_AutoMl_ListModels_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py new file mode 100644 index 00000000..904728e9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_UndeployModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_undeploy_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_UndeployModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py new file mode 100644 index 00000000..38f1566c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_UndeployModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_undeploy_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_UndeployModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py new file mode 100644 index 00000000..a49b5e3c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_UpdateDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_update_dataset(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_UpdateDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py new file mode 100644 index 00000000..8422cfae --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_UpdateDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_update_dataset(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_UpdateDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py new file mode 100644 index 00000000..d3d96b92 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_UpdateModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_update_model(): + # Create a client + client = automl_v1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1.UpdateModelRequest( + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_UpdateModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py new file mode 100644 index 00000000..8b379db9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_AutoMl_UpdateModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_update_model(): + # Create a client + client = automl_v1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1.UpdateModelRequest( + ) + + # Make the request + response = client.update_model(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_AutoMl_UpdateModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py new file mode 100644 index 00000000..5aecf9df --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_PredictionService_BatchPredict_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_batch_predict(): + # Create a client + client = automl_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + input_config = automl_v1.BatchPredictInputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + output_config = automl_v1.BatchPredictOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.BatchPredictRequest( + name="name_value", + input_config=input_config, + output_config=output_config, + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1_generated_PredictionService_BatchPredict_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py new file mode 100644 index 00000000..2e45b20a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_PredictionService_BatchPredict_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_batch_predict(): + # Create a client + client = automl_v1.PredictionServiceClient() + + # Initialize request argument(s) + input_config = automl_v1.BatchPredictInputConfig() + input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] + + output_config = automl_v1.BatchPredictOutputConfig() + output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = automl_v1.BatchPredictRequest( + name="name_value", + input_config=input_config, + output_config=output_config, + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1_generated_PredictionService_BatchPredict_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py new file mode 100644 index 00000000..81f2f562 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_PredictionService_Predict_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +async def sample_predict(): + # Create a client + client = automl_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + payload = automl_v1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_PredictionService_Predict_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py new file mode 100644 index 00000000..4f46921a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1_generated_PredictionService_Predict_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1 + + +def sample_predict(): + # Create a client + client = automl_v1.PredictionServiceClient() + + # Initialize request argument(s) + payload = automl_v1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + +# [END automl_v1_generated_PredictionService_Predict_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json b/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json new file mode 100644 index 00000000..df593972 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json @@ -0,0 +1,3339 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.automl.v1", + "version": "v1" + } + ], + "language": "PYTHON", + "name": "google-cloud-automl", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.create_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.CreateDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "automl_v1_generated_auto_ml_create_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_CreateDataset_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_create_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.create_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.CreateDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "automl_v1_generated_auto_ml_create_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_CreateDataset_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_create_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.create_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.CreateModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.CreateModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.automl_v1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_model" + }, + "description": "Sample for CreateModel", + "file": "automl_v1_generated_auto_ml_create_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_CreateModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_create_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.create_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.CreateModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.CreateModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.automl_v1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_model" + }, + "description": "Sample for CreateModel", + "file": "automl_v1_generated_auto_ml_create_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_CreateModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_create_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.delete_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.DeleteDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "automl_v1_generated_auto_ml_delete_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_DeleteDataset_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_delete_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.delete_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.DeleteDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "automl_v1_generated_auto_ml_delete_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_DeleteDataset_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_delete_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.delete_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.DeleteModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "automl_v1_generated_auto_ml_delete_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_DeleteModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_delete_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.delete_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.DeleteModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "automl_v1_generated_auto_ml_delete_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_DeleteModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_delete_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.deploy_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.DeployModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.DeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "automl_v1_generated_auto_ml_deploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_DeployModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_deploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.deploy_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.DeployModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.DeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "automl_v1_generated_auto_ml_deploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_DeployModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_deploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.export_data", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ExportData", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1.types.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "automl_v1_generated_auto_ml_export_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ExportData_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_export_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.export_data", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ExportData", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1.types.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "automl_v1_generated_auto_ml_export_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ExportData_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_export_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.export_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ExportModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1.types.ModelExportOutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "automl_v1_generated_auto_ml_export_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ExportModel_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_export_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.export_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ExportModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1.types.ModelExportOutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "automl_v1_generated_auto_ml_export_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ExportModel_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_export_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "automl_v1_generated_auto_ml_get_annotation_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetAnnotationSpec_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_annotation_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "automl_v1_generated_auto_ml_get_annotation_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetAnnotationSpec_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_annotation_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "automl_v1_generated_auto_ml_get_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetDataset_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.get_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "automl_v1_generated_auto_ml_get_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetDataset_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetModelEvaluation", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "automl_v1_generated_auto_ml_get_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetModelEvaluation_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetModelEvaluation", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "automl_v1_generated_auto_ml_get_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetModelEvaluation_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "automl_v1_generated_auto_ml_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetModel_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.get_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.GetModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "automl_v1_generated_auto_ml_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_GetModel_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_get_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.import_data", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ImportData", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1.types.InputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "automl_v1_generated_auto_ml_import_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ImportData_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_import_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.import_data", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ImportData", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1.types.InputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "automl_v1_generated_auto_ml_import_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ImportData_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_import_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.list_datasets", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ListDatasets", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsAsyncPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "automl_v1_generated_auto_ml_list_datasets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ListDatasets_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_list_datasets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.list_datasets", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ListDatasets", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "automl_v1_generated_auto_ml_list_datasets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ListDatasets_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_list_datasets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ListModelEvaluations", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "filter", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "automl_v1_generated_auto_ml_list_model_evaluations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ListModelEvaluations_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_list_model_evaluations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ListModelEvaluations", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "filter", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "automl_v1_generated_auto_ml_list_model_evaluations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ListModelEvaluations_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_list_model_evaluations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.list_models", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ListModels", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelsAsyncPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "automl_v1_generated_auto_ml_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ListModels_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_list_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.list_models", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.ListModels", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelsPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "automl_v1_generated_auto_ml_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_ListModels_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_list_models_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.undeploy_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.UndeployModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.UndeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "automl_v1_generated_auto_ml_undeploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_UndeployModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_undeploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.undeploy_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.UndeployModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.UndeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "automl_v1_generated_auto_ml_undeploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_UndeployModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_undeploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.update_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.UpdateDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "automl_v1_generated_auto_ml_update_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_UpdateDataset_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_update_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.update_dataset", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.UpdateDataset", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "automl_v1_generated_auto_ml_update_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_UpdateDataset_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_update_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.update_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.UpdateModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.automl_v1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Model", + "shortName": "update_model" + }, + "description": "Sample for UpdateModel", + "file": "automl_v1_generated_auto_ml_update_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_UpdateModel_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_update_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1.AutoMlClient.update_model", + "method": { + "fullName": "google.cloud.automl.v1.AutoMl.UpdateModel", + "service": { + "fullName": "google.cloud.automl.v1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.automl_v1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.Model", + "shortName": "update_model" + }, + "description": "Sample for UpdateModel", + "file": "automl_v1_generated_auto_ml_update_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_AutoMl_UpdateModel_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_auto_ml_update_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient.batch_predict", + "method": { + "fullName": "google.cloud.automl.v1.PredictionService.BatchPredict", + "service": { + "fullName": "google.cloud.automl.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "BatchPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.BatchPredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1.types.BatchPredictInputConfig" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1.types.BatchPredictOutputConfig" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_predict" + }, + "description": "Sample for BatchPredict", + "file": "automl_v1_generated_prediction_service_batch_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_PredictionService_BatchPredict_async", + "segments": [ + { + "end": 63, + "start": 27, + "type": "FULL" + }, + { + "end": 63, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 53, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 60, + "start": 54, + "type": "REQUEST_EXECUTION" + }, + { + "end": 64, + "start": 61, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_prediction_service_batch_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.automl_v1.PredictionServiceClient.batch_predict", + "method": { + "fullName": "google.cloud.automl.v1.PredictionService.BatchPredict", + "service": { + "fullName": "google.cloud.automl.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "BatchPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.BatchPredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1.types.BatchPredictInputConfig" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1.types.BatchPredictOutputConfig" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_predict" + }, + "description": "Sample for BatchPredict", + "file": "automl_v1_generated_prediction_service_batch_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_PredictionService_BatchPredict_sync", + "segments": [ + { + "end": 63, + "start": 27, + "type": "FULL" + }, + { + "end": 63, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 53, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 60, + "start": 54, + "type": "REQUEST_EXECUTION" + }, + { + "end": 64, + "start": 61, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_prediction_service_batch_predict_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient.predict", + "method": { + "fullName": "google.cloud.automl.v1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.automl.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.PredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "payload", + "type": "google.cloud.automl_v1.types.ExamplePayload" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "automl_v1_generated_prediction_service_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_PredictionService_Predict_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_prediction_service_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.automl_v1.PredictionServiceClient.predict", + "method": { + "fullName": "google.cloud.automl.v1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.automl.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1.types.PredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "payload", + "type": "google.cloud.automl_v1.types.ExamplePayload" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "automl_v1_generated_prediction_service_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1_generated_PredictionService_Predict_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1_generated_prediction_service_predict_sync.py" + } + ] +} diff --git a/owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py new file mode 100644 index 00000000..ef6140ff --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py @@ -0,0 +1,195 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class automlCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_predict': ('name', 'input_config', 'output_config', 'params', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_model': ('parent', 'model', ), + 'delete_dataset': ('name', ), + 'delete_model': ('name', ), + 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ), + 'export_data': ('name', 'output_config', ), + 'export_model': ('name', 'output_config', ), + 'get_annotation_spec': ('name', ), + 'get_dataset': ('name', ), + 'get_model': ('name', ), + 'get_model_evaluation': ('name', ), + 'import_data': ('name', 'input_config', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', ), + 'predict': ('name', 'payload', 'params', ), + 'undeploy_model': ('name', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_model': ('model', 'update_mask', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=automlCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the automl client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py new file mode 100644 index 00000000..95b4c8d1 --- /dev/null +++ b/owl-bot-staging/v1/setup.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os + +import setuptools # type: ignore + +package_root = os.path.abspath(os.path.dirname(__file__)) + +name = 'google-cloud-automl' + + +description = "Google Cloud Automl API client library" + +version = {} +with open(os.path.join(package_root, 'google/cloud/automl/gapic_version.py')) as fp: + exec(fp.read(), version) +version = version["__version__"] + +if version[0] == "0": + release_status = "Development Status :: 4 - Beta" +else: + release_status = "Development Status :: 5 - Production/Stable" + +dependencies = [ + "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "proto-plus >= 1.22.0, <2.0.0dev", + "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", + "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", +] +url = "https://github.com/googleapis/python-automl" + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +packages = [ + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") +] + +namespaces = ["google", "google.cloud"] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url=url, + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + python_requires=">=3.7", + namespace_packages=namespaces, + install_requires=dependencies, + include_package_data=True, + zip_safe=False, +) diff --git a/owl-bot-staging/v1/testing/constraints-3.10.txt b/owl-bot-staging/v1/testing/constraints-3.10.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1/testing/constraints-3.10.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.11.txt b/owl-bot-staging/v1/testing/constraints-3.11.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1/testing/constraints-3.11.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.12.txt b/owl-bot-staging/v1/testing/constraints-3.12.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1/testing/constraints-3.12.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.7.txt b/owl-bot-staging/v1/testing/constraints-3.7.txt new file mode 100644 index 00000000..6c44adfe --- /dev/null +++ b/owl-bot-staging/v1/testing/constraints-3.7.txt @@ -0,0 +1,9 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List all library dependencies and extras in this file. +# Pin the version to the lower bound. +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.34.0 +proto-plus==1.22.0 +protobuf==3.19.5 diff --git a/owl-bot-staging/v1/testing/constraints-3.8.txt b/owl-bot-staging/v1/testing/constraints-3.8.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1/testing/constraints-3.8.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.9.txt b/owl-bot-staging/v1/testing/constraints-3.9.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1/testing/constraints-3.9.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py new file mode 100644 index 00000000..494cd187 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py @@ -0,0 +1,10997 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1.services.auto_ml import AutoMlAsyncClient +from google.cloud.automl_v1.services.auto_ml import AutoMlClient +from google.cloud.automl_v1.services.auto_ml import pagers +from google.cloud.automl_v1.services.auto_ml import transports +from google.cloud.automl_v1.types import annotation_spec +from google.cloud.automl_v1.types import classification +from google.cloud.automl_v1.types import dataset +from google.cloud.automl_v1.types import dataset as gca_dataset +from google.cloud.automl_v1.types import detection +from google.cloud.automl_v1.types import image +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import model +from google.cloud.automl_v1.types import model as gca_model +from google.cloud.automl_v1.types import model_evaluation +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import service +from google.cloud.automl_v1.types import text +from google.cloud.automl_v1.types import text_extraction +from google.cloud.automl_v1.types import text_sentiment +from google.cloud.automl_v1.types import translation +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoMlClient._get_default_mtls_endpoint(None) is None + assert AutoMlClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (AutoMlClient, "grpc"), + (AutoMlAsyncClient, "grpc_asyncio"), + (AutoMlClient, "rest"), +]) +def test_auto_ml_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.AutoMlGrpcTransport, "grpc"), + (transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.AutoMlRestTransport, "rest"), +]) +def test_auto_ml_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (AutoMlClient, "grpc"), + (AutoMlAsyncClient, "grpc_asyncio"), + (AutoMlClient, "rest"), +]) +def test_auto_ml_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +def test_auto_ml_client_get_transport_class(): + transport = AutoMlClient.get_transport_class() + available_transports = [ + transports.AutoMlGrpcTransport, + transports.AutoMlRestTransport, + ] + assert transport in available_transports + + transport = AutoMlClient.get_transport_class("grpc") + assert transport == transports.AutoMlGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + (AutoMlClient, transports.AutoMlRestTransport, "rest"), +]) +@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) +@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) +def test_auto_ml_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "true"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "false"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (AutoMlClient, transports.AutoMlRestTransport, "rest", "true"), + (AutoMlClient, transports.AutoMlRestTransport, "rest", "false"), +]) +@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) +@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_auto_ml_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + AutoMlClient, AutoMlAsyncClient +]) +@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) +@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) +def test_auto_ml_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + (AutoMlClient, transports.AutoMlRestTransport, "rest"), +]) +def test_auto_ml_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (AutoMlClient, transports.AutoMlRestTransport, "rest", None), +]) +def test_auto_ml_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_auto_ml_client_client_options_from_dict(): + with mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = AutoMlClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_auto_ml_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + service.CreateDatasetRequest, + dict, +]) +def test_create_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateDatasetRequest() + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=service.CreateDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_dataset_async_from_dict(): + await test_create_dataset_async(request_type=dict) + + +def test_create_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + + +def test_create_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetDatasetRequest, + dict, +]) +def test_get_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetDatasetRequest() + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=service.GetDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + )) + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_dataset_async_from_dict(): + await test_get_dataset_async(request_type=dict) + + +def test_get_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = dataset.Dataset() + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + service.GetDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + service.GetDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListDatasetsRequest, + dict, +]) +def test_list_datasets(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListDatasetsRequest() + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=service.ListDatasetsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_datasets_async_from_dict(): + await test_list_datasets_async(request_type=dict) + + +def test_list_datasets_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = service.ListDatasetsResponse() + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_datasets_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_datasets_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + service.ListDatasetsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) +def test_list_datasets_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_datasets(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + service.UpdateDatasetRequest, + dict, +]) +def test_update_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateDatasetRequest() + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=service.UpdateDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + )) + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_dataset_async_from_dict(): + await test_update_dataset_async(request_type=dict) + + +def test_update_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = gca_dataset.Dataset() + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +def test_update_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeleteDatasetRequest, + dict, +]) +def test_delete_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteDatasetRequest() + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=service.DeleteDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_dataset_async_from_dict(): + await test_delete_dataset_async(request_type=dict) + + +def test_delete_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + service.DeleteDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + service.DeleteDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ImportDataRequest, + dict, +]) +def test_import_data(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ImportDataRequest() + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=service.ImportDataRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_data_async_from_dict(): + await test_import_data_async(request_type=dict) + + +def test_import_data_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_import_data_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + + +def test_import_data_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + service.ImportDataRequest(), + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + service.ImportDataRequest(), + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportDataRequest, + dict, +]) +def test_export_data(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportDataRequest() + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=service.ExportDataRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_data_async_from_dict(): + await test_export_data_async(request_type=dict) + + +def test_export_data_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_data_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + + +def test_export_data_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + service.ExportDataRequest(), + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + service.ExportDataRequest(), + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetAnnotationSpecRequest, + dict, +]) +def test_get_annotation_spec(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + example_count=1396, + ) + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.example_count == 1396 + + +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetAnnotationSpecRequest() + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetAnnotationSpecRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + example_count=1396, + )) + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.example_count == 1396 + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async_from_dict(): + await test_get_annotation_spec_async(request_type=dict) + + +def test_get_annotation_spec_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = annotation_spec.AnnotationSpec() + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_annotation_spec_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_annotation_spec_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + service.GetAnnotationSpecRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.CreateModelRequest, + dict, +]) +def test_create_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + client.create_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateModelRequest() + +@pytest.mark.asyncio +async def test_create_model_async(transport: str = 'grpc_asyncio', request_type=service.CreateModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_model_async_from_dict(): + await test_create_model_async(request_type=dict) + + +def test_create_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model( + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) + assert arg == mock_val + + +def test_create_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model( + service.CreateModelRequest(), + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + +@pytest.mark.asyncio +async def test_create_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model( + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model( + service.CreateModelRequest(), + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelRequest, + dict, +]) +def test_get_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=model.Model.DeploymentState.DEPLOYED, + etag='etag_value', + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + assert response.etag == 'etag_value' + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelRequest() + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=service.GetModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=model.Model.DeploymentState.DEPLOYED, + etag='etag_value', + )) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + service.GetModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + service.GetModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelsRequest, + dict, +]) +def test_list_models(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelsRequest() + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=service.ListModelsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = service.ListModelsResponse() + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_models_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_models_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + service.ListModelsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) +def test_list_models_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_models(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + service.DeleteModelRequest, + dict, +]) +def test_delete_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteModelRequest() + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=service.DeleteModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_async_from_dict(): + await test_delete_model_async(request_type=dict) + + +def test_delete_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + service.DeleteModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + service.DeleteModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.UpdateModelRequest, + dict, +]) +def test_update_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=gca_model.Model.DeploymentState.DEPLOYED, + etag='etag_value', + ) + response = client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED + assert response.etag == 'etag_value' + + +def test_update_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + client.update_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateModelRequest() + +@pytest.mark.asyncio +async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=service.UpdateModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=gca_model.Model.DeploymentState.DEPLOYED, + etag='etag_value', + )) + response = await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_model_async_from_dict(): + await test_update_model_async(request_type=dict) + + +def test_update_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateModelRequest() + + request.model.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = gca_model.Model() + client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateModelRequest() + + request.model.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=name_value', + ) in kw['metadata'] + + +def test_update_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model( + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + service.UpdateModelRequest(), + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model( + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model( + service.UpdateModelRequest(), + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeployModelRequest, + dict, +]) +def test_deploy_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeployModelRequest() + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=service.DeployModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_model_async_from_dict(): + await test_deploy_model_async(request_type=dict) + + +def test_deploy_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_deploy_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_deploy_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + service.DeployModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + service.DeployModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.UndeployModelRequest, + dict, +]) +def test_undeploy_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UndeployModelRequest() + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=service.UndeployModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_model_async_from_dict(): + await test_undeploy_model_async(request_type=dict) + + +def test_undeploy_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_undeploy_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_undeploy_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + service.UndeployModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + service.UndeployModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportModelRequest, + dict, +]) +def test_export_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportModelRequest() + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=service.ExportModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_model_async_from_dict(): + await test_export_model_async(request_type=dict) + + +def test_export_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + + +def test_export_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + service.ExportModelRequest(), + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + service.ExportModelRequest(), + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelEvaluationRequest, + dict, +]) +def test_get_model_evaluation(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name='name_value', + annotation_spec_id='annotation_spec_id_value', + display_name='display_name_value', + evaluated_example_count=2446, + ) + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.annotation_spec_id == 'annotation_spec_id_value' + assert response.display_name == 'display_name_value' + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelEvaluationRequest() + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=service.GetModelEvaluationRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( + name='name_value', + annotation_spec_id='annotation_spec_id_value', + display_name='display_name_value', + evaluated_example_count=2446, + )) + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.annotation_spec_id == 'annotation_spec_id_value' + assert response.display_name == 'display_name_value' + assert response.evaluated_example_count == 2446 + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async_from_dict(): + await test_get_model_evaluation_async(request_type=dict) + + +def test_get_model_evaluation_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = model_evaluation.ModelEvaluation() + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_evaluation_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_evaluation_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + service.GetModelEvaluationRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + service.GetModelEvaluationRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelEvaluationsRequest, + dict, +]) +def test_list_model_evaluations(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelEvaluationsRequest() + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=service.ListModelEvaluationsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_from_dict(): + await test_list_model_evaluations_async(request_type=dict) + + +def test_list_model_evaluations_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = service.ListModelEvaluationsResponse() + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_evaluations_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations( + parent='parent_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].filter + mock_val = 'filter_value' + assert arg == mock_val + + +def test_list_model_evaluations_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent='parent_value', + filter='filter_value', + ) + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations( + parent='parent_value', + filter='filter_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].filter + mock_val = 'filter_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent='parent_value', + filter='filter_value', + ) + + +def test_list_model_evaluations_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) +def test_list_model_evaluations_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_model_evaluations(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.CreateDatasetRequest, + dict, +]) +def test_create_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'name': 'name_value', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {'seconds': 751, 'nanos': 543}, 'etag': 'etag_value', 'labels': {}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.CreateDatasetRequest.meta.fields["dataset"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["dataset"][field])): + del request_init["dataset"][field][i][subfield] + else: + del request_init["dataset"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_dataset(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_dataset_rest_required_fields(request_type=service.CreateDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "dataset", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_create_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.CreateDatasetRequest.pb(service.CreateDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.CreateDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_dataset_rest_bad_request(transport: str = 'rest', request_type=service.CreateDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_dataset(request) + + +def test_create_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) + + +def test_create_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + +def test_create_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetDatasetRequest, + dict, +]) +def test_get_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_dataset(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_get_dataset_rest_required_fields(request_type=service.GetDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = dataset.Dataset() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetDatasetRequest.pb(service.GetDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = dataset.Dataset.to_json(dataset.Dataset()) + + request = service.GetDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = dataset.Dataset() + + client.get_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_dataset_rest_bad_request(transport: str = 'rest', request_type=service.GetDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_dataset(request) + + +def test_get_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = dataset.Dataset() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) + + +def test_get_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + service.GetDatasetRequest(), + name='name_value', + ) + + +def test_get_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListDatasetsRequest, + dict, +]) +def test_list_datasets_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListDatasetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_datasets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_rest_required_fields(request_type=service.ListDatasetsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListDatasetsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListDatasetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_datasets(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_datasets_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_datasets._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_datasets_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_datasets") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_datasets") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListDatasetsRequest.pb(service.ListDatasetsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListDatasetsResponse.to_json(service.ListDatasetsResponse()) + + request = service.ListDatasetsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListDatasetsResponse() + + client.list_datasets(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_datasets_rest_bad_request(transport: str = 'rest', request_type=service.ListDatasetsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_datasets(request) + + +def test_list_datasets_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListDatasetsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListDatasetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_datasets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) + + +def test_list_datasets_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListDatasetsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + pager = client.list_datasets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) + + pages = list(client.list_datasets(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.UpdateDatasetRequest, + dict, +]) +def test_update_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} + request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'name': 'projects/sample1/locations/sample2/datasets/sample3', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {'seconds': 751, 'nanos': 543}, 'etag': 'etag_value', 'labels': {}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.UpdateDatasetRequest.meta.fields["dataset"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["dataset"][field])): + del request_init["dataset"][field][i][subfield] + else: + del request_init["dataset"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_dataset(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_update_dataset_rest_required_fields(request_type=service.UpdateDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("dataset", "updateMask", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_update_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UpdateDatasetRequest.pb(service.UpdateDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) + + request = service.UpdateDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_dataset.Dataset() + + client.update_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_dataset_rest_bad_request(transport: str = 'rest', request_type=service.UpdateDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_dataset(request) + + +def test_update_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset() + + # get arguments that satisfy an http rule for this method + sample_request = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} + + # get truthy value for each flattened field + mock_args = dict( + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{dataset.name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) + + +def test_update_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeleteDatasetRequest, + dict, +]) +def test_delete_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_dataset(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_dataset_rest_required_fields(request_type=service.DeleteDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.DeleteDatasetRequest.pb(service.DeleteDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.DeleteDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_dataset_rest_bad_request(transport: str = 'rest', request_type=service.DeleteDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_dataset(request) + + +def test_delete_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) + + +def test_delete_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + service.DeleteDatasetRequest(), + name='name_value', + ) + + +def test_delete_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ImportDataRequest, + dict, +]) +def test_import_data_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.import_data(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_import_data_rest_required_fields(request_type=service.ImportDataRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.import_data(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_import_data_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.import_data._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "inputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_import_data_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_import_data") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_import_data") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ImportDataRequest.pb(service.ImportDataRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.ImportDataRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.import_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_import_data_rest_bad_request(transport: str = 'rest', request_type=service.ImportDataRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.import_data(request) + + +def test_import_data_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.import_data(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}:importData" % client.transport._host, args[1]) + + +def test_import_data_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + service.ImportDataRequest(), + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + +def test_import_data_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportDataRequest, + dict, +]) +def test_export_data_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.export_data(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_export_data_rest_required_fields(request_type=service.ExportDataRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.export_data(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_export_data_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.export_data._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_export_data_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_export_data") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_data") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ExportDataRequest.pb(service.ExportDataRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.ExportDataRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.export_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_export_data_rest_bad_request(transport: str = 'rest', request_type=service.ExportDataRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.export_data(request) + + +def test_export_data_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.export_data(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}:exportData" % client.transport._host, args[1]) + + +def test_export_data_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + service.ExportDataRequest(), + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +def test_export_data_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetAnnotationSpecRequest, + dict, +]) +def test_get_annotation_spec_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + example_count=1396, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = annotation_spec.AnnotationSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_annotation_spec(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.example_count == 1396 + + +def test_get_annotation_spec_rest_required_fields(request_type=service.GetAnnotationSpecRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = annotation_spec.AnnotationSpec() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = annotation_spec.AnnotationSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_annotation_spec(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_annotation_spec_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_annotation_spec._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_annotation_spec_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_annotation_spec") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_annotation_spec") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetAnnotationSpecRequest.pb(service.GetAnnotationSpecRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = annotation_spec.AnnotationSpec.to_json(annotation_spec.AnnotationSpec()) + + request = service.GetAnnotationSpecRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = annotation_spec.AnnotationSpec() + + client.get_annotation_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_annotation_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetAnnotationSpecRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_annotation_spec(request) + + +def test_get_annotation_spec_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = annotation_spec.AnnotationSpec() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = annotation_spec.AnnotationSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_annotation_spec(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}" % client.transport._host, args[1]) + + +def test_get_annotation_spec_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +def test_get_annotation_spec_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.CreateModelRequest, + dict, +]) +def test_create_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request_init["model"] = {'translation_model_metadata': {'base_model': 'base_model_value', 'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_model_metadata': {'base_model_id': 'base_model_id_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881, 'stop_reason': 'stop_reason_value', 'model_type': 'model_type_value', 'node_qps': 0.857, 'node_count': 1070}, 'text_classification_model_metadata': {'classification_type': 1}, 'image_object_detection_model_metadata': {'model_type': 'model_type_value', 'node_count': 1070, 'node_qps': 0.857, 'stop_reason': 'stop_reason_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881}, 'text_extraction_model_metadata': {}, 'text_sentiment_model_metadata': {}, 'name': 'name_value', 'display_name': 'display_name_value', 'dataset_id': 'dataset_id_value', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'deployment_state': 1, 'etag': 'etag_value', 'labels': {}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.CreateModelRequest.meta.fields["model"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["model"][field])): + del request_init["model"][field][i][subfield] + else: + del request_init["model"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_model_rest_required_fields(request_type=service.CreateModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "model", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_create_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.CreateModelRequest.pb(service.CreateModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.CreateModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_model_rest_bad_request(transport: str = 'rest', request_type=service.CreateModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_model(request) + + +def test_create_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) + + +def test_create_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model( + service.CreateModelRequest(), + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + +def test_create_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelRequest, + dict, +]) +def test_get_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=model.Model.DeploymentState.DEPLOYED, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_model(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + assert response.etag == 'etag_value' + + +def test_get_model_rest_required_fields(request_type=service.GetModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model.Model() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetModelRequest.pb(service.GetModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model.Model.to_json(model.Model()) + + request = service.GetModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model.Model() + + client.get_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_rest_bad_request(transport: str = 'rest', request_type=service.GetModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model(request) + + +def test_get_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model.Model() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) + + +def test_get_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + service.GetModelRequest(), + name='name_value', + ) + + +def test_get_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelsRequest, + dict, +]) +def test_list_models_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_models(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_rest_required_fields(request_type=service.ListModelsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListModelsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_models(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_models_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_models._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_models_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_models") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_models") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListModelsRequest.pb(service.ListModelsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListModelsResponse.to_json(service.ListModelsResponse()) + + request = service.ListModelsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListModelsResponse() + + client.list_models(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_models_rest_bad_request(transport: str = 'rest', request_type=service.ListModelsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_models(request) + + +def test_list_models_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_models(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) + + +def test_list_models_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListModelsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + pager = client.list_models(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) + + pages = list(client.list_models(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.DeleteModelRequest, + dict, +]) +def test_delete_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_model_rest_required_fields(request_type=service.DeleteModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.DeleteModelRequest.pb(service.DeleteModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.DeleteModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_model_rest_bad_request(transport: str = 'rest', request_type=service.DeleteModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_model(request) + + +def test_delete_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) + + +def test_delete_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + service.DeleteModelRequest(), + name='name_value', + ) + + +def test_delete_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.UpdateModelRequest, + dict, +]) +def test_update_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'model': {'name': 'projects/sample1/locations/sample2/models/sample3'}} + request_init["model"] = {'translation_model_metadata': {'base_model': 'base_model_value', 'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_model_metadata': {'base_model_id': 'base_model_id_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881, 'stop_reason': 'stop_reason_value', 'model_type': 'model_type_value', 'node_qps': 0.857, 'node_count': 1070}, 'text_classification_model_metadata': {'classification_type': 1}, 'image_object_detection_model_metadata': {'model_type': 'model_type_value', 'node_count': 1070, 'node_qps': 0.857, 'stop_reason': 'stop_reason_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881}, 'text_extraction_model_metadata': {}, 'text_sentiment_model_metadata': {}, 'name': 'projects/sample1/locations/sample2/models/sample3', 'display_name': 'display_name_value', 'dataset_id': 'dataset_id_value', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'deployment_state': 1, 'etag': 'etag_value', 'labels': {}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.UpdateModelRequest.meta.fields["model"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["model"][field])): + del request_init["model"][field][i][subfield] + else: + del request_init["model"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=gca_model.Model.DeploymentState.DEPLOYED, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_model(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED + assert response.etag == 'etag_value' + + +def test_update_model_rest_required_fields(request_type=service.UpdateModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_model._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_model.Model() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("model", "updateMask", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_update_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UpdateModelRequest.pb(service.UpdateModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_model.Model.to_json(gca_model.Model()) + + request = service.UpdateModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_model.Model() + + client.update_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_model_rest_bad_request(transport: str = 'rest', request_type=service.UpdateModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'model': {'name': 'projects/sample1/locations/sample2/models/sample3'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_model(request) + + +def test_update_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_model.Model() + + # get arguments that satisfy an http rule for this method + sample_request = {'model': {'name': 'projects/sample1/locations/sample2/models/sample3'}} + + # get truthy value for each flattened field + mock_args = dict( + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{model.name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) + + +def test_update_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + service.UpdateModelRequest(), + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeployModelRequest, + dict, +]) +def test_deploy_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.deploy_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_deploy_model_rest_required_fields(request_type=service.DeployModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.deploy_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_deploy_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.deploy_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_deploy_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_deploy_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_deploy_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.DeployModelRequest.pb(service.DeployModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.DeployModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.deploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_deploy_model_rest_bad_request(transport: str = 'rest', request_type=service.DeployModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.deploy_model(request) + + +def test_deploy_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.deploy_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:deploy" % client.transport._host, args[1]) + + +def test_deploy_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + service.DeployModelRequest(), + name='name_value', + ) + + +def test_deploy_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.UndeployModelRequest, + dict, +]) +def test_undeploy_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.undeploy_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_undeploy_model_rest_required_fields(request_type=service.UndeployModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.undeploy_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_undeploy_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.undeploy_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_undeploy_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_undeploy_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_undeploy_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UndeployModelRequest.pb(service.UndeployModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.UndeployModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.undeploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_undeploy_model_rest_bad_request(transport: str = 'rest', request_type=service.UndeployModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.undeploy_model(request) + + +def test_undeploy_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.undeploy_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:undeploy" % client.transport._host, args[1]) + + +def test_undeploy_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + service.UndeployModelRequest(), + name='name_value', + ) + + +def test_undeploy_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportModelRequest, + dict, +]) +def test_export_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.export_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_export_model_rest_required_fields(request_type=service.ExportModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.export_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_export_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.export_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_export_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_export_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ExportModelRequest.pb(service.ExportModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.ExportModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.export_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_export_model_rest_bad_request(transport: str = 'rest', request_type=service.ExportModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.export_model(request) + + +def test_export_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.export_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:export" % client.transport._host, args[1]) + + +def test_export_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + service.ExportModelRequest(), + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +def test_export_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelEvaluationRequest, + dict, +]) +def test_get_model_evaluation_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model_evaluation.ModelEvaluation( + name='name_value', + annotation_spec_id='annotation_spec_id_value', + display_name='display_name_value', + evaluated_example_count=2446, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_evaluation.ModelEvaluation.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_model_evaluation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.annotation_spec_id == 'annotation_spec_id_value' + assert response.display_name == 'display_name_value' + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_rest_required_fields(request_type=service.GetModelEvaluationRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_evaluation.ModelEvaluation() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model_evaluation.ModelEvaluation.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_model_evaluation(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_model_evaluation_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_model_evaluation._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_evaluation_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model_evaluation") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model_evaluation") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetModelEvaluationRequest.pb(service.GetModelEvaluationRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model_evaluation.ModelEvaluation.to_json(model_evaluation.ModelEvaluation()) + + request = service.GetModelEvaluationRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_evaluation.ModelEvaluation() + + client.get_model_evaluation(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_evaluation_rest_bad_request(transport: str = 'rest', request_type=service.GetModelEvaluationRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model_evaluation(request) + + +def test_get_model_evaluation_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model_evaluation.ModelEvaluation() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_evaluation.ModelEvaluation.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_model_evaluation(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" % client.transport._host, args[1]) + + +def test_get_model_evaluation_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + service.GetModelEvaluationRequest(), + name='name_value', + ) + + +def test_get_model_evaluation_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelEvaluationsRequest, + dict, +]) +def test_list_model_evaluations_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelEvaluationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_model_evaluations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_rest_required_fields(request_type=service.ListModelEvaluationsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["filter"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + assert "filter" not in jsonified_request + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "filter" in jsonified_request + assert jsonified_request["filter"] == request_init["filter"] + + jsonified_request["parent"] = 'parent_value' + jsonified_request["filter"] = 'filter_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "filter" in jsonified_request + assert jsonified_request["filter"] == 'filter_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListModelEvaluationsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListModelEvaluationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_model_evaluations(request) + + expected_params = [ + ( + "filter", + "", + ), + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_model_evaluations_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_model_evaluations._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", "filter", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_model_evaluations_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_model_evaluations") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_model_evaluations") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListModelEvaluationsRequest.pb(service.ListModelEvaluationsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListModelEvaluationsResponse.to_json(service.ListModelEvaluationsResponse()) + + request = service.ListModelEvaluationsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListModelEvaluationsResponse() + + client.list_model_evaluations(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_model_evaluations_rest_bad_request(transport: str = 'rest', request_type=service.ListModelEvaluationsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_model_evaluations(request) + + +def test_list_model_evaluations_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelEvaluationsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + filter='filter_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelEvaluationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_model_evaluations(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations" % client.transport._host, args[1]) + + +def test_list_model_evaluations_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent='parent_value', + filter='filter_value', + ) + + +def test_list_model_evaluations_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListModelEvaluationsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + + pager = client.list_model_evaluations(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) + + pages = list(client.list_model_evaluations(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = AutoMlClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = AutoMlClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AutoMlClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoMlGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.AutoMlGrpcTransport, + transports.AutoMlGrpcAsyncIOTransport, + transports.AutoMlRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = AutoMlClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.AutoMlGrpcTransport, + ) + +def test_auto_ml_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.AutoMlTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_auto_ml_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.AutoMlTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_dataset', + 'get_dataset', + 'list_datasets', + 'update_dataset', + 'delete_dataset', + 'import_data', + 'export_data', + 'get_annotation_spec', + 'create_model', + 'get_model', + 'list_models', + 'delete_model', + 'update_model', + 'deploy_model', + 'undeploy_model', + 'export_model', + 'get_model_evaluation', + 'list_model_evaluations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_auto_ml_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoMlTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_auto_ml_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoMlTransport() + adc.assert_called_once() + + +def test_auto_ml_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AutoMlClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoMlGrpcTransport, + transports.AutoMlGrpcAsyncIOTransport, + ], +) +def test_auto_ml_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoMlGrpcTransport, + transports.AutoMlGrpcAsyncIOTransport, + transports.AutoMlRestTransport, + ], +) +def test_auto_ml_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.AutoMlGrpcTransport, grpc_helpers), + (transports.AutoMlGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_auto_ml_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) +def test_auto_ml_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_auto_ml_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.AutoMlRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_auto_ml_rest_lro_client(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_auto_ml_host_no_port(transport_name): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_auto_ml_host_with_port(transport_name): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_auto_ml_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = AutoMlClient( + credentials=creds1, + transport=transport_name, + ) + client2 = AutoMlClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_dataset._session + session2 = client2.transport.create_dataset._session + assert session1 != session2 + session1 = client1.transport.get_dataset._session + session2 = client2.transport.get_dataset._session + assert session1 != session2 + session1 = client1.transport.list_datasets._session + session2 = client2.transport.list_datasets._session + assert session1 != session2 + session1 = client1.transport.update_dataset._session + session2 = client2.transport.update_dataset._session + assert session1 != session2 + session1 = client1.transport.delete_dataset._session + session2 = client2.transport.delete_dataset._session + assert session1 != session2 + session1 = client1.transport.import_data._session + session2 = client2.transport.import_data._session + assert session1 != session2 + session1 = client1.transport.export_data._session + session2 = client2.transport.export_data._session + assert session1 != session2 + session1 = client1.transport.get_annotation_spec._session + session2 = client2.transport.get_annotation_spec._session + assert session1 != session2 + session1 = client1.transport.create_model._session + session2 = client2.transport.create_model._session + assert session1 != session2 + session1 = client1.transport.get_model._session + session2 = client2.transport.get_model._session + assert session1 != session2 + session1 = client1.transport.list_models._session + session2 = client2.transport.list_models._session + assert session1 != session2 + session1 = client1.transport.delete_model._session + session2 = client2.transport.delete_model._session + assert session1 != session2 + session1 = client1.transport.update_model._session + session2 = client2.transport.update_model._session + assert session1 != session2 + session1 = client1.transport.deploy_model._session + session2 = client2.transport.deploy_model._session + assert session1 != session2 + session1 = client1.transport.undeploy_model._session + session2 = client2.transport.undeploy_model._session + assert session1 != session2 + session1 = client1.transport.export_model._session + session2 = client2.transport.export_model._session + assert session1 != session2 + session1 = client1.transport.get_model_evaluation._session + session2 = client2.transport.get_model_evaluation._session + assert session1 != session2 + session1 = client1.transport.list_model_evaluations._session + session2 = client2.transport.list_model_evaluations._session + assert session1 != session2 +def test_auto_ml_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_auto_ml_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) +def test_auto_ml_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) +def test_auto_ml_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_auto_ml_grpc_lro_client(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_auto_ml_grpc_lro_async_client(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotation_spec_path(): + project = "squid" + location = "clam" + dataset = "whelk" + annotation_spec = "octopus" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + actual = AutoMlClient.annotation_spec_path(project, location, dataset, annotation_spec) + assert expected == actual + + +def test_parse_annotation_spec_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "dataset": "cuttlefish", + "annotation_spec": "mussel", + } + path = AutoMlClient.annotation_spec_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_annotation_spec_path(path) + assert expected == actual + +def test_dataset_path(): + project = "winkle" + location = "nautilus" + dataset = "scallop" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = AutoMlClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "abalone", + "location": "squid", + "dataset": "clam", + } + path = AutoMlClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_dataset_path(path) + assert expected == actual + +def test_model_path(): + project = "whelk" + location = "octopus" + model = "oyster" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = AutoMlClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "model": "mussel", + } + path = AutoMlClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_model_path(path) + assert expected == actual + +def test_model_evaluation_path(): + project = "winkle" + location = "nautilus" + model = "scallop" + model_evaluation = "abalone" + expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) + actual = AutoMlClient.model_evaluation_path(project, location, model, model_evaluation) + assert expected == actual + + +def test_parse_model_evaluation_path(): + expected = { + "project": "squid", + "location": "clam", + "model": "whelk", + "model_evaluation": "octopus", + } + path = AutoMlClient.model_evaluation_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_model_evaluation_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = AutoMlClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = AutoMlClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format(folder=folder, ) + actual = AutoMlClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = AutoMlClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format(organization=organization, ) + actual = AutoMlClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = AutoMlClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format(project=project, ) + actual = AutoMlClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = AutoMlClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = AutoMlClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = AutoMlClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: + transport_class = AutoMlClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (AutoMlClient, transports.AutoMlGrpcTransport), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py new file mode 100644 index 00000000..b17e3639 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py @@ -0,0 +1,2269 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1.services.prediction_service import PredictionServiceAsyncClient +from google.cloud.automl_v1.services.prediction_service import PredictionServiceClient +from google.cloud.automl_v1.services.prediction_service import transports +from google.cloud.automl_v1.types import annotation_payload +from google.cloud.automl_v1.types import data_items +from google.cloud.automl_v1.types import geometry +from google.cloud.automl_v1.types import io +from google.cloud.automl_v1.types import operations +from google.cloud.automl_v1.types import prediction_service +from google.cloud.automl_v1.types import text_segment +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), + (PredictionServiceClient, "rest"), +]) +def test_prediction_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PredictionServiceGrpcTransport, "grpc"), + (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.PredictionServiceRestTransport, "rest"), +]) +def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), + (PredictionServiceClient, "rest"), +]) +def test_prediction_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + available_transports = [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceRestTransport, + ] + assert transport in available_transports + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "true"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "false"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, PredictionServiceAsyncClient +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), +]) +def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", None), +]) +def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_prediction_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.PredictRequest, + dict, +]) +def test_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse( + ) + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + +@pytest.mark.asyncio +async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( + )) + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +@pytest.mark.asyncio +async def test_predict_async_from_dict(): + await test_predict_async(request_type=dict) + + +def test_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = prediction_service.PredictResponse() + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].payload + mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + + +def test_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].payload + mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.BatchPredictRequest, + dict, +]) +def test_batch_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.BatchPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + client.batch_predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.BatchPredictRequest() + +@pytest.mark.asyncio +async def test_batch_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.BatchPredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.BatchPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_predict_async_from_dict(): + await test_batch_predict_async(request_type=dict) + + +def test_batch_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_batch_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_predict( + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + arg = args[0].output_config + mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + + +def test_batch_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_predict( + prediction_service.BatchPredictRequest(), + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + +@pytest.mark.asyncio +async def test_batch_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_predict( + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + arg = args[0].output_config + mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_predict( + prediction_service.BatchPredictRequest(), + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.PredictRequest, + dict, +]) +def test_predict_rest(request_type): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = prediction_service.PredictResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = prediction_service.PredictResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.predict(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_rest_required_fields(request_type=prediction_service.PredictRequest): + transport_class = transports.PredictionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = prediction_service.PredictResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = prediction_service.PredictResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.predict(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_predict_rest_unset_required_fields(): + transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.predict._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "payload", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_predict_rest_interceptors(null_interceptor): + transport = transports.PredictionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), + ) + client = PredictionServiceClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "post_predict") as post, \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_predict") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = prediction_service.PredictRequest.pb(prediction_service.PredictRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = prediction_service.PredictResponse.to_json(prediction_service.PredictResponse()) + + request = prediction_service.PredictRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = prediction_service.PredictResponse() + + client.predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.PredictRequest): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.predict(request) + + +def test_predict_rest_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = prediction_service.PredictResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = prediction_service.PredictResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.predict(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:predict" % client.transport._host, args[1]) + + +def test_predict_rest_flattened_error(transport: str = 'rest'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + +def test_predict_rest_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.BatchPredictRequest, + dict, +]) +def test_batch_predict_rest(request_type): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.batch_predict(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_batch_predict_rest_required_fields(request_type=prediction_service.BatchPredictRequest): + transport_class = transports.PredictionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.batch_predict(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_batch_predict_rest_unset_required_fields(): + transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.batch_predict._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "inputConfig", "outputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_predict_rest_interceptors(null_interceptor): + transport = transports.PredictionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), + ) + client = PredictionServiceClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "post_batch_predict") as post, \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_batch_predict") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = prediction_service.BatchPredictRequest.pb(prediction_service.BatchPredictRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = prediction_service.BatchPredictRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.batch_predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.BatchPredictRequest): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_predict(request) + + +def test_batch_predict_rest_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.batch_predict(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:batchPredict" % client.transport._host, args[1]) + + +def test_batch_predict_rest_flattened_error(transport: str = 'rest'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_predict( + prediction_service.BatchPredictRequest(), + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + +def test_batch_predict_rest_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + transports.PredictionServiceRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = PredictionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PredictionServiceGrpcTransport, + ) + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'predict', + 'batch_predict', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_prediction_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport() + adc.assert_called_once() + + +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +def test_prediction_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + transports.PredictionServiceRestTransport, + ], +) +def test_prediction_service_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PredictionServiceGrpcTransport, grpc_helpers), + (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_prediction_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.PredictionServiceRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_prediction_service_rest_lro_client(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_prediction_service_host_no_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_prediction_service_host_with_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_prediction_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = PredictionServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = PredictionServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.predict._session + session2 = client2.transport.predict._session + assert session1 != session2 + session1 = client1.transport.batch_predict._session + session2 = client2.transport.batch_predict._session + assert session1 != session2 +def test_prediction_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_prediction_service_grpc_lro_client(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_prediction_service_grpc_lro_async_client(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PredictionServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = PredictionServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_model_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PredictionServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = PredictionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = PredictionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = PredictionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PredictionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = PredictionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = PredictionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = PredictionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PredictionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = PredictionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc new file mode 100644 index 00000000..8705cefd --- /dev/null +++ b/owl-bot-staging/v1beta1/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/automl/__init__.py + google/cloud/automl/gapic_version.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/owl-bot-staging/v1beta1/.flake8 b/owl-bot-staging/v1beta1/.flake8 new file mode 100644 index 00000000..29227d4c --- /dev/null +++ b/owl-bot-staging/v1beta1/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in new file mode 100644 index 00000000..ba187221 --- /dev/null +++ b/owl-bot-staging/v1beta1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/automl *.py +recursive-include google/cloud/automl_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst new file mode 100644 index 00000000..d0dde648 --- /dev/null +++ b/owl-bot-staging/v1beta1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Automl API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Automl API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/_static/custom.css b/owl-bot-staging/v1beta1/docs/_static/custom.css new file mode 100644 index 00000000..06423be0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/_static/custom.css @@ -0,0 +1,3 @@ +dl.field-list > dt { + min-width: 100px +} diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst new file mode 100644 index 00000000..ddb02f63 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst @@ -0,0 +1,10 @@ +AutoMl +------------------------ + +.. automodule:: google.cloud.automl_v1beta1.services.auto_ml + :members: + :inherited-members: + +.. automodule:: google.cloud.automl_v1beta1.services.auto_ml.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst new file mode 100644 index 00000000..e234e69f --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.automl_v1beta1.services.prediction_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst new file mode 100644 index 00000000..ebd9c7c8 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst @@ -0,0 +1,7 @@ +Services for Google Cloud Automl v1beta1 API +============================================ +.. toctree:: + :maxdepth: 2 + + auto_ml + prediction_service diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst new file mode 100644 index 00000000..b50b55f6 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Automl v1beta1 API +========================================= + +.. automodule:: google.cloud.automl_v1beta1.types + :members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py new file mode 100644 index 00000000..708bcaa7 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-automl documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "4.0.1" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = u"google-cloud-automl" +copyright = u"2023, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-automl-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-cloud-automl.tex", + u"google-cloud-automl Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-cloud-automl", + u"Google Cloud Automl Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-cloud-automl", + u"google-cloud-automl Documentation", + author, + "google-cloud-automl", + "GAPIC library for Google Cloud Automl API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst new file mode 100644 index 00000000..f7d269b1 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + automl_v1beta1/services + automl_v1beta1/types diff --git a/owl-bot-staging/v1beta1/google/cloud/automl/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl/__init__.py new file mode 100644 index 00000000..98db5cfc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl/__init__.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.automl import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.cloud.automl_v1beta1.services.auto_ml.client import AutoMlClient +from google.cloud.automl_v1beta1.services.auto_ml.async_client import AutoMlAsyncClient +from google.cloud.automl_v1beta1.services.prediction_service.client import PredictionServiceClient +from google.cloud.automl_v1beta1.services.prediction_service.async_client import PredictionServiceAsyncClient + +from google.cloud.automl_v1beta1.types.annotation_payload import AnnotationPayload +from google.cloud.automl_v1beta1.types.annotation_spec import AnnotationSpec +from google.cloud.automl_v1beta1.types.classification import ClassificationAnnotation +from google.cloud.automl_v1beta1.types.classification import ClassificationEvaluationMetrics +from google.cloud.automl_v1beta1.types.classification import VideoClassificationAnnotation +from google.cloud.automl_v1beta1.types.classification import ClassificationType +from google.cloud.automl_v1beta1.types.column_spec import ColumnSpec +from google.cloud.automl_v1beta1.types.data_items import Document +from google.cloud.automl_v1beta1.types.data_items import DocumentDimensions +from google.cloud.automl_v1beta1.types.data_items import ExamplePayload +from google.cloud.automl_v1beta1.types.data_items import Image +from google.cloud.automl_v1beta1.types.data_items import Row +from google.cloud.automl_v1beta1.types.data_items import TextSnippet +from google.cloud.automl_v1beta1.types.data_stats import ArrayStats +from google.cloud.automl_v1beta1.types.data_stats import CategoryStats +from google.cloud.automl_v1beta1.types.data_stats import CorrelationStats +from google.cloud.automl_v1beta1.types.data_stats import DataStats +from google.cloud.automl_v1beta1.types.data_stats import Float64Stats +from google.cloud.automl_v1beta1.types.data_stats import StringStats +from google.cloud.automl_v1beta1.types.data_stats import StructStats +from google.cloud.automl_v1beta1.types.data_stats import TimestampStats +from google.cloud.automl_v1beta1.types.data_types import DataType +from google.cloud.automl_v1beta1.types.data_types import StructType +from google.cloud.automl_v1beta1.types.data_types import TypeCode +from google.cloud.automl_v1beta1.types.dataset import Dataset +from google.cloud.automl_v1beta1.types.detection import BoundingBoxMetricsEntry +from google.cloud.automl_v1beta1.types.detection import ImageObjectDetectionAnnotation +from google.cloud.automl_v1beta1.types.detection import ImageObjectDetectionEvaluationMetrics +from google.cloud.automl_v1beta1.types.detection import VideoObjectTrackingAnnotation +from google.cloud.automl_v1beta1.types.detection import VideoObjectTrackingEvaluationMetrics +from google.cloud.automl_v1beta1.types.geometry import BoundingPoly +from google.cloud.automl_v1beta1.types.geometry import NormalizedVertex +from google.cloud.automl_v1beta1.types.image import ImageClassificationDatasetMetadata +from google.cloud.automl_v1beta1.types.image import ImageClassificationModelDeploymentMetadata +from google.cloud.automl_v1beta1.types.image import ImageClassificationModelMetadata +from google.cloud.automl_v1beta1.types.image import ImageObjectDetectionDatasetMetadata +from google.cloud.automl_v1beta1.types.image import ImageObjectDetectionModelDeploymentMetadata +from google.cloud.automl_v1beta1.types.image import ImageObjectDetectionModelMetadata +from google.cloud.automl_v1beta1.types.io import BatchPredictInputConfig +from google.cloud.automl_v1beta1.types.io import BatchPredictOutputConfig +from google.cloud.automl_v1beta1.types.io import BigQueryDestination +from google.cloud.automl_v1beta1.types.io import BigQuerySource +from google.cloud.automl_v1beta1.types.io import DocumentInputConfig +from google.cloud.automl_v1beta1.types.io import ExportEvaluatedExamplesOutputConfig +from google.cloud.automl_v1beta1.types.io import GcrDestination +from google.cloud.automl_v1beta1.types.io import GcsDestination +from google.cloud.automl_v1beta1.types.io import GcsSource +from google.cloud.automl_v1beta1.types.io import InputConfig +from google.cloud.automl_v1beta1.types.io import ModelExportOutputConfig +from google.cloud.automl_v1beta1.types.io import OutputConfig +from google.cloud.automl_v1beta1.types.model import Model +from google.cloud.automl_v1beta1.types.model_evaluation import ModelEvaluation +from google.cloud.automl_v1beta1.types.operations import BatchPredictOperationMetadata +from google.cloud.automl_v1beta1.types.operations import CreateModelOperationMetadata +from google.cloud.automl_v1beta1.types.operations import DeleteOperationMetadata +from google.cloud.automl_v1beta1.types.operations import DeployModelOperationMetadata +from google.cloud.automl_v1beta1.types.operations import ExportDataOperationMetadata +from google.cloud.automl_v1beta1.types.operations import ExportEvaluatedExamplesOperationMetadata +from google.cloud.automl_v1beta1.types.operations import ExportModelOperationMetadata +from google.cloud.automl_v1beta1.types.operations import ImportDataOperationMetadata +from google.cloud.automl_v1beta1.types.operations import OperationMetadata +from google.cloud.automl_v1beta1.types.operations import UndeployModelOperationMetadata +from google.cloud.automl_v1beta1.types.prediction_service import BatchPredictRequest +from google.cloud.automl_v1beta1.types.prediction_service import BatchPredictResult +from google.cloud.automl_v1beta1.types.prediction_service import PredictRequest +from google.cloud.automl_v1beta1.types.prediction_service import PredictResponse +from google.cloud.automl_v1beta1.types.ranges import DoubleRange +from google.cloud.automl_v1beta1.types.regression import RegressionEvaluationMetrics +from google.cloud.automl_v1beta1.types.service import CreateDatasetRequest +from google.cloud.automl_v1beta1.types.service import CreateModelRequest +from google.cloud.automl_v1beta1.types.service import DeleteDatasetRequest +from google.cloud.automl_v1beta1.types.service import DeleteModelRequest +from google.cloud.automl_v1beta1.types.service import DeployModelRequest +from google.cloud.automl_v1beta1.types.service import ExportDataRequest +from google.cloud.automl_v1beta1.types.service import ExportEvaluatedExamplesRequest +from google.cloud.automl_v1beta1.types.service import ExportModelRequest +from google.cloud.automl_v1beta1.types.service import GetAnnotationSpecRequest +from google.cloud.automl_v1beta1.types.service import GetColumnSpecRequest +from google.cloud.automl_v1beta1.types.service import GetDatasetRequest +from google.cloud.automl_v1beta1.types.service import GetModelEvaluationRequest +from google.cloud.automl_v1beta1.types.service import GetModelRequest +from google.cloud.automl_v1beta1.types.service import GetTableSpecRequest +from google.cloud.automl_v1beta1.types.service import ImportDataRequest +from google.cloud.automl_v1beta1.types.service import ListColumnSpecsRequest +from google.cloud.automl_v1beta1.types.service import ListColumnSpecsResponse +from google.cloud.automl_v1beta1.types.service import ListDatasetsRequest +from google.cloud.automl_v1beta1.types.service import ListDatasetsResponse +from google.cloud.automl_v1beta1.types.service import ListModelEvaluationsRequest +from google.cloud.automl_v1beta1.types.service import ListModelEvaluationsResponse +from google.cloud.automl_v1beta1.types.service import ListModelsRequest +from google.cloud.automl_v1beta1.types.service import ListModelsResponse +from google.cloud.automl_v1beta1.types.service import ListTableSpecsRequest +from google.cloud.automl_v1beta1.types.service import ListTableSpecsResponse +from google.cloud.automl_v1beta1.types.service import UndeployModelRequest +from google.cloud.automl_v1beta1.types.service import UpdateColumnSpecRequest +from google.cloud.automl_v1beta1.types.service import UpdateDatasetRequest +from google.cloud.automl_v1beta1.types.service import UpdateTableSpecRequest +from google.cloud.automl_v1beta1.types.table_spec import TableSpec +from google.cloud.automl_v1beta1.types.tables import TablesAnnotation +from google.cloud.automl_v1beta1.types.tables import TablesDatasetMetadata +from google.cloud.automl_v1beta1.types.tables import TablesModelColumnInfo +from google.cloud.automl_v1beta1.types.tables import TablesModelMetadata +from google.cloud.automl_v1beta1.types.temporal import TimeSegment +from google.cloud.automl_v1beta1.types.text import TextClassificationDatasetMetadata +from google.cloud.automl_v1beta1.types.text import TextClassificationModelMetadata +from google.cloud.automl_v1beta1.types.text import TextExtractionDatasetMetadata +from google.cloud.automl_v1beta1.types.text import TextExtractionModelMetadata +from google.cloud.automl_v1beta1.types.text import TextSentimentDatasetMetadata +from google.cloud.automl_v1beta1.types.text import TextSentimentModelMetadata +from google.cloud.automl_v1beta1.types.text_extraction import TextExtractionAnnotation +from google.cloud.automl_v1beta1.types.text_extraction import TextExtractionEvaluationMetrics +from google.cloud.automl_v1beta1.types.text_segment import TextSegment +from google.cloud.automl_v1beta1.types.text_sentiment import TextSentimentAnnotation +from google.cloud.automl_v1beta1.types.text_sentiment import TextSentimentEvaluationMetrics +from google.cloud.automl_v1beta1.types.translation import TranslationAnnotation +from google.cloud.automl_v1beta1.types.translation import TranslationDatasetMetadata +from google.cloud.automl_v1beta1.types.translation import TranslationEvaluationMetrics +from google.cloud.automl_v1beta1.types.translation import TranslationModelMetadata +from google.cloud.automl_v1beta1.types.video import VideoClassificationDatasetMetadata +from google.cloud.automl_v1beta1.types.video import VideoClassificationModelMetadata +from google.cloud.automl_v1beta1.types.video import VideoObjectTrackingDatasetMetadata +from google.cloud.automl_v1beta1.types.video import VideoObjectTrackingModelMetadata + +__all__ = ('AutoMlClient', + 'AutoMlAsyncClient', + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', + 'AnnotationPayload', + 'AnnotationSpec', + 'ClassificationAnnotation', + 'ClassificationEvaluationMetrics', + 'VideoClassificationAnnotation', + 'ClassificationType', + 'ColumnSpec', + 'Document', + 'DocumentDimensions', + 'ExamplePayload', + 'Image', + 'Row', + 'TextSnippet', + 'ArrayStats', + 'CategoryStats', + 'CorrelationStats', + 'DataStats', + 'Float64Stats', + 'StringStats', + 'StructStats', + 'TimestampStats', + 'DataType', + 'StructType', + 'TypeCode', + 'Dataset', + 'BoundingBoxMetricsEntry', + 'ImageObjectDetectionAnnotation', + 'ImageObjectDetectionEvaluationMetrics', + 'VideoObjectTrackingAnnotation', + 'VideoObjectTrackingEvaluationMetrics', + 'BoundingPoly', + 'NormalizedVertex', + 'ImageClassificationDatasetMetadata', + 'ImageClassificationModelDeploymentMetadata', + 'ImageClassificationModelMetadata', + 'ImageObjectDetectionDatasetMetadata', + 'ImageObjectDetectionModelDeploymentMetadata', + 'ImageObjectDetectionModelMetadata', + 'BatchPredictInputConfig', + 'BatchPredictOutputConfig', + 'BigQueryDestination', + 'BigQuerySource', + 'DocumentInputConfig', + 'ExportEvaluatedExamplesOutputConfig', + 'GcrDestination', + 'GcsDestination', + 'GcsSource', + 'InputConfig', + 'ModelExportOutputConfig', + 'OutputConfig', + 'Model', + 'ModelEvaluation', + 'BatchPredictOperationMetadata', + 'CreateModelOperationMetadata', + 'DeleteOperationMetadata', + 'DeployModelOperationMetadata', + 'ExportDataOperationMetadata', + 'ExportEvaluatedExamplesOperationMetadata', + 'ExportModelOperationMetadata', + 'ImportDataOperationMetadata', + 'OperationMetadata', + 'UndeployModelOperationMetadata', + 'BatchPredictRequest', + 'BatchPredictResult', + 'PredictRequest', + 'PredictResponse', + 'DoubleRange', + 'RegressionEvaluationMetrics', + 'CreateDatasetRequest', + 'CreateModelRequest', + 'DeleteDatasetRequest', + 'DeleteModelRequest', + 'DeployModelRequest', + 'ExportDataRequest', + 'ExportEvaluatedExamplesRequest', + 'ExportModelRequest', + 'GetAnnotationSpecRequest', + 'GetColumnSpecRequest', + 'GetDatasetRequest', + 'GetModelEvaluationRequest', + 'GetModelRequest', + 'GetTableSpecRequest', + 'ImportDataRequest', + 'ListColumnSpecsRequest', + 'ListColumnSpecsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListTableSpecsRequest', + 'ListTableSpecsResponse', + 'UndeployModelRequest', + 'UpdateColumnSpecRequest', + 'UpdateDatasetRequest', + 'UpdateTableSpecRequest', + 'TableSpec', + 'TablesAnnotation', + 'TablesDatasetMetadata', + 'TablesModelColumnInfo', + 'TablesModelMetadata', + 'TimeSegment', + 'TextClassificationDatasetMetadata', + 'TextClassificationModelMetadata', + 'TextExtractionDatasetMetadata', + 'TextExtractionModelMetadata', + 'TextSentimentDatasetMetadata', + 'TextSentimentModelMetadata', + 'TextExtractionAnnotation', + 'TextExtractionEvaluationMetrics', + 'TextSegment', + 'TextSentimentAnnotation', + 'TextSentimentEvaluationMetrics', + 'TranslationAnnotation', + 'TranslationDatasetMetadata', + 'TranslationEvaluationMetrics', + 'TranslationModelMetadata', + 'VideoClassificationDatasetMetadata', + 'VideoClassificationModelMetadata', + 'VideoObjectTrackingDatasetMetadata', + 'VideoObjectTrackingModelMetadata', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py b/owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py new file mode 100644 index 00000000..360a0d13 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1beta1/google/cloud/automl/py.typed b/owl-bot-staging/v1beta1/google/cloud/automl/py.typed new file mode 100644 index 00000000..0560ba18 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py new file mode 100644 index 00000000..16ca8585 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.automl_v1beta1 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.auto_ml import AutoMlClient +from .services.auto_ml import AutoMlAsyncClient +from .services.prediction_service import PredictionServiceClient +from .services.prediction_service import PredictionServiceAsyncClient + +from .types.annotation_payload import AnnotationPayload +from .types.annotation_spec import AnnotationSpec +from .types.classification import ClassificationAnnotation +from .types.classification import ClassificationEvaluationMetrics +from .types.classification import VideoClassificationAnnotation +from .types.classification import ClassificationType +from .types.column_spec import ColumnSpec +from .types.data_items import Document +from .types.data_items import DocumentDimensions +from .types.data_items import ExamplePayload +from .types.data_items import Image +from .types.data_items import Row +from .types.data_items import TextSnippet +from .types.data_stats import ArrayStats +from .types.data_stats import CategoryStats +from .types.data_stats import CorrelationStats +from .types.data_stats import DataStats +from .types.data_stats import Float64Stats +from .types.data_stats import StringStats +from .types.data_stats import StructStats +from .types.data_stats import TimestampStats +from .types.data_types import DataType +from .types.data_types import StructType +from .types.data_types import TypeCode +from .types.dataset import Dataset +from .types.detection import BoundingBoxMetricsEntry +from .types.detection import ImageObjectDetectionAnnotation +from .types.detection import ImageObjectDetectionEvaluationMetrics +from .types.detection import VideoObjectTrackingAnnotation +from .types.detection import VideoObjectTrackingEvaluationMetrics +from .types.geometry import BoundingPoly +from .types.geometry import NormalizedVertex +from .types.image import ImageClassificationDatasetMetadata +from .types.image import ImageClassificationModelDeploymentMetadata +from .types.image import ImageClassificationModelMetadata +from .types.image import ImageObjectDetectionDatasetMetadata +from .types.image import ImageObjectDetectionModelDeploymentMetadata +from .types.image import ImageObjectDetectionModelMetadata +from .types.io import BatchPredictInputConfig +from .types.io import BatchPredictOutputConfig +from .types.io import BigQueryDestination +from .types.io import BigQuerySource +from .types.io import DocumentInputConfig +from .types.io import ExportEvaluatedExamplesOutputConfig +from .types.io import GcrDestination +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.io import InputConfig +from .types.io import ModelExportOutputConfig +from .types.io import OutputConfig +from .types.model import Model +from .types.model_evaluation import ModelEvaluation +from .types.operations import BatchPredictOperationMetadata +from .types.operations import CreateModelOperationMetadata +from .types.operations import DeleteOperationMetadata +from .types.operations import DeployModelOperationMetadata +from .types.operations import ExportDataOperationMetadata +from .types.operations import ExportEvaluatedExamplesOperationMetadata +from .types.operations import ExportModelOperationMetadata +from .types.operations import ImportDataOperationMetadata +from .types.operations import OperationMetadata +from .types.operations import UndeployModelOperationMetadata +from .types.prediction_service import BatchPredictRequest +from .types.prediction_service import BatchPredictResult +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.ranges import DoubleRange +from .types.regression import RegressionEvaluationMetrics +from .types.service import CreateDatasetRequest +from .types.service import CreateModelRequest +from .types.service import DeleteDatasetRequest +from .types.service import DeleteModelRequest +from .types.service import DeployModelRequest +from .types.service import ExportDataRequest +from .types.service import ExportEvaluatedExamplesRequest +from .types.service import ExportModelRequest +from .types.service import GetAnnotationSpecRequest +from .types.service import GetColumnSpecRequest +from .types.service import GetDatasetRequest +from .types.service import GetModelEvaluationRequest +from .types.service import GetModelRequest +from .types.service import GetTableSpecRequest +from .types.service import ImportDataRequest +from .types.service import ListColumnSpecsRequest +from .types.service import ListColumnSpecsResponse +from .types.service import ListDatasetsRequest +from .types.service import ListDatasetsResponse +from .types.service import ListModelEvaluationsRequest +from .types.service import ListModelEvaluationsResponse +from .types.service import ListModelsRequest +from .types.service import ListModelsResponse +from .types.service import ListTableSpecsRequest +from .types.service import ListTableSpecsResponse +from .types.service import UndeployModelRequest +from .types.service import UpdateColumnSpecRequest +from .types.service import UpdateDatasetRequest +from .types.service import UpdateTableSpecRequest +from .types.table_spec import TableSpec +from .types.tables import TablesAnnotation +from .types.tables import TablesDatasetMetadata +from .types.tables import TablesModelColumnInfo +from .types.tables import TablesModelMetadata +from .types.temporal import TimeSegment +from .types.text import TextClassificationDatasetMetadata +from .types.text import TextClassificationModelMetadata +from .types.text import TextExtractionDatasetMetadata +from .types.text import TextExtractionModelMetadata +from .types.text import TextSentimentDatasetMetadata +from .types.text import TextSentimentModelMetadata +from .types.text_extraction import TextExtractionAnnotation +from .types.text_extraction import TextExtractionEvaluationMetrics +from .types.text_segment import TextSegment +from .types.text_sentiment import TextSentimentAnnotation +from .types.text_sentiment import TextSentimentEvaluationMetrics +from .types.translation import TranslationAnnotation +from .types.translation import TranslationDatasetMetadata +from .types.translation import TranslationEvaluationMetrics +from .types.translation import TranslationModelMetadata +from .types.video import VideoClassificationDatasetMetadata +from .types.video import VideoClassificationModelMetadata +from .types.video import VideoObjectTrackingDatasetMetadata +from .types.video import VideoObjectTrackingModelMetadata + +__all__ = ( + 'AutoMlAsyncClient', + 'PredictionServiceAsyncClient', +'AnnotationPayload', +'AnnotationSpec', +'ArrayStats', +'AutoMlClient', +'BatchPredictInputConfig', +'BatchPredictOperationMetadata', +'BatchPredictOutputConfig', +'BatchPredictRequest', +'BatchPredictResult', +'BigQueryDestination', +'BigQuerySource', +'BoundingBoxMetricsEntry', +'BoundingPoly', +'CategoryStats', +'ClassificationAnnotation', +'ClassificationEvaluationMetrics', +'ClassificationType', +'ColumnSpec', +'CorrelationStats', +'CreateDatasetRequest', +'CreateModelOperationMetadata', +'CreateModelRequest', +'DataStats', +'DataType', +'Dataset', +'DeleteDatasetRequest', +'DeleteModelRequest', +'DeleteOperationMetadata', +'DeployModelOperationMetadata', +'DeployModelRequest', +'Document', +'DocumentDimensions', +'DocumentInputConfig', +'DoubleRange', +'ExamplePayload', +'ExportDataOperationMetadata', +'ExportDataRequest', +'ExportEvaluatedExamplesOperationMetadata', +'ExportEvaluatedExamplesOutputConfig', +'ExportEvaluatedExamplesRequest', +'ExportModelOperationMetadata', +'ExportModelRequest', +'Float64Stats', +'GcrDestination', +'GcsDestination', +'GcsSource', +'GetAnnotationSpecRequest', +'GetColumnSpecRequest', +'GetDatasetRequest', +'GetModelEvaluationRequest', +'GetModelRequest', +'GetTableSpecRequest', +'Image', +'ImageClassificationDatasetMetadata', +'ImageClassificationModelDeploymentMetadata', +'ImageClassificationModelMetadata', +'ImageObjectDetectionAnnotation', +'ImageObjectDetectionDatasetMetadata', +'ImageObjectDetectionEvaluationMetrics', +'ImageObjectDetectionModelDeploymentMetadata', +'ImageObjectDetectionModelMetadata', +'ImportDataOperationMetadata', +'ImportDataRequest', +'InputConfig', +'ListColumnSpecsRequest', +'ListColumnSpecsResponse', +'ListDatasetsRequest', +'ListDatasetsResponse', +'ListModelEvaluationsRequest', +'ListModelEvaluationsResponse', +'ListModelsRequest', +'ListModelsResponse', +'ListTableSpecsRequest', +'ListTableSpecsResponse', +'Model', +'ModelEvaluation', +'ModelExportOutputConfig', +'NormalizedVertex', +'OperationMetadata', +'OutputConfig', +'PredictRequest', +'PredictResponse', +'PredictionServiceClient', +'RegressionEvaluationMetrics', +'Row', +'StringStats', +'StructStats', +'StructType', +'TableSpec', +'TablesAnnotation', +'TablesDatasetMetadata', +'TablesModelColumnInfo', +'TablesModelMetadata', +'TextClassificationDatasetMetadata', +'TextClassificationModelMetadata', +'TextExtractionAnnotation', +'TextExtractionDatasetMetadata', +'TextExtractionEvaluationMetrics', +'TextExtractionModelMetadata', +'TextSegment', +'TextSentimentAnnotation', +'TextSentimentDatasetMetadata', +'TextSentimentEvaluationMetrics', +'TextSentimentModelMetadata', +'TextSnippet', +'TimeSegment', +'TimestampStats', +'TranslationAnnotation', +'TranslationDatasetMetadata', +'TranslationEvaluationMetrics', +'TranslationModelMetadata', +'TypeCode', +'UndeployModelOperationMetadata', +'UndeployModelRequest', +'UpdateColumnSpecRequest', +'UpdateDatasetRequest', +'UpdateTableSpecRequest', +'VideoClassificationAnnotation', +'VideoClassificationDatasetMetadata', +'VideoClassificationModelMetadata', +'VideoObjectTrackingAnnotation', +'VideoObjectTrackingDatasetMetadata', +'VideoObjectTrackingEvaluationMetrics', +'VideoObjectTrackingModelMetadata', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json new file mode 100644 index 00000000..74e85289 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json @@ -0,0 +1,437 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.automl_v1beta1", + "protoPackage": "google.cloud.automl.v1beta1", + "schema": "1.0", + "services": { + "AutoMl": { + "clients": { + "grpc": { + "libraryClient": "AutoMlClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "CreateModel": { + "methods": [ + "create_model" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "ExportEvaluatedExamples": { + "methods": [ + "export_evaluated_examples" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetColumnSpec": { + "methods": [ + "get_column_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetTableSpec": { + "methods": [ + "get_table_spec" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListColumnSpecs": { + "methods": [ + "list_column_specs" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "ListTableSpecs": { + "methods": [ + "list_table_specs" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateColumnSpec": { + "methods": [ + "update_column_spec" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + }, + "UpdateTableSpec": { + "methods": [ + "update_table_spec" + ] + } + } + }, + "grpc-async": { + "libraryClient": "AutoMlAsyncClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "CreateModel": { + "methods": [ + "create_model" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "ExportEvaluatedExamples": { + "methods": [ + "export_evaluated_examples" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetColumnSpec": { + "methods": [ + "get_column_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetTableSpec": { + "methods": [ + "get_table_spec" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListColumnSpecs": { + "methods": [ + "list_column_specs" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "ListTableSpecs": { + "methods": [ + "list_table_specs" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateColumnSpec": { + "methods": [ + "update_column_spec" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + }, + "UpdateTableSpec": { + "methods": [ + "update_table_spec" + ] + } + } + }, + "rest": { + "libraryClient": "AutoMlClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "CreateModel": { + "methods": [ + "create_model" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "ExportEvaluatedExamples": { + "methods": [ + "export_evaluated_examples" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetColumnSpec": { + "methods": [ + "get_column_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetTableSpec": { + "methods": [ + "get_table_spec" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListColumnSpecs": { + "methods": [ + "list_column_specs" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "ListTableSpecs": { + "methods": [ + "list_table_specs" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateColumnSpec": { + "methods": [ + "update_column_spec" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + }, + "UpdateTableSpec": { + "methods": [ + "update_table_spec" + ] + } + } + } + } + }, + "PredictionService": { + "clients": { + "grpc": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "BatchPredict": { + "methods": [ + "batch_predict" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PredictionServiceAsyncClient", + "rpcs": { + "BatchPredict": { + "methods": [ + "batch_predict" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + }, + "rest": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "BatchPredict": { + "methods": [ + "batch_predict" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py new file mode 100644 index 00000000..360a0d13 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed new file mode 100644 index 00000000..0560ba18 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py new file mode 100644 index 00000000..89a37dc9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py new file mode 100644 index 00000000..8f53357e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import AutoMlClient +from .async_client import AutoMlAsyncClient + +__all__ = ( + 'AutoMlClient', + 'AutoMlAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py new file mode 100644 index 00000000..b905c3da --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py @@ -0,0 +1,3170 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union + +from google.cloud.automl_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1beta1.services.auto_ml import pagers +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import data_types +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport +from .client import AutoMlClient + + +class AutoMlAsyncClient: + """AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + _client: AutoMlClient + + DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT + + annotation_spec_path = staticmethod(AutoMlClient.annotation_spec_path) + parse_annotation_spec_path = staticmethod(AutoMlClient.parse_annotation_spec_path) + column_spec_path = staticmethod(AutoMlClient.column_spec_path) + parse_column_spec_path = staticmethod(AutoMlClient.parse_column_spec_path) + dataset_path = staticmethod(AutoMlClient.dataset_path) + parse_dataset_path = staticmethod(AutoMlClient.parse_dataset_path) + model_path = staticmethod(AutoMlClient.model_path) + parse_model_path = staticmethod(AutoMlClient.parse_model_path) + model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path) + parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path) + table_spec_path = staticmethod(AutoMlClient.table_spec_path) + parse_table_spec_path = staticmethod(AutoMlClient.parse_table_spec_path) + common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(AutoMlClient.parse_common_billing_account_path) + common_folder_path = staticmethod(AutoMlClient.common_folder_path) + parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path) + common_organization_path = staticmethod(AutoMlClient.common_organization_path) + parse_common_organization_path = staticmethod(AutoMlClient.parse_common_organization_path) + common_project_path = staticmethod(AutoMlClient.common_project_path) + parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path) + common_location_path = staticmethod(AutoMlClient.common_location_path) + parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_info.__func__(AutoMlAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_file.__func__(AutoMlAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return AutoMlClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> AutoMlTransport: + """Returns the transport used by the client instance. + + Returns: + AutoMlTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(AutoMlClient).get_transport_class, type(AutoMlClient)) + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AutoMlTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = AutoMlClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_dataset(self, + request: Optional[Union[service.CreateDatasetRequest, dict]] = None, + *, + parent: Optional[str] = None, + dataset: Optional[gca_dataset.Dataset] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Creates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_create_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + response = await client.create_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.CreateDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the + project to create the dataset for. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_dataset(self, + request: Optional[Union[service.GetDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_get_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.GetDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_datasets(self, + request: Optional[Union[service.ListDatasetsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists datasets in a project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_list_datasets(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ListDatasetsRequest, dict]]): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + parent (:class:`str`): + Required. The resource name of the + project from which to list datasets. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset(self, + request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, + *, + dataset: Optional[gca_dataset.Dataset] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_update_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.UpdateDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] + dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`): + Required. The dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_dataset(self, + request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_delete_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.DeleteDatasetRequest, dict]]): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the + dataset to delete. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data(self, + request: Optional[Union[service.ImportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.InputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_import_data(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ImportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ImportDataRequest, dict]]): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + name (:class:`str`): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`google.cloud.automl_v1beta1.types.InputConfig`): + Required. The desired input location + and its domain specific semantics, if + any. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data(self, + request: Optional[Union[service.ExportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.OutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_export_data(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ExportDataRequest, dict]]): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + name (:class:`str`): + Required. The resource name of the + dataset. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.automl_v1beta1.types.OutputConfig`): + Required. The desired output + location. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec(self, + request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_get_annotation_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest, dict]]): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. + name (:class:`str`): + Required. The resource name of the + annotation spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_table_spec(self, + request: Optional[Union[service.GetTableSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table_spec.TableSpec: + r"""Gets a table spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_get_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetTableSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_table_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.GetTableSpecRequest, dict]]): + The request object. Request message for + [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. + name (:class:`str`): + Required. The resource name of the + table spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetTableSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_table_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_table_specs(self, + request: Optional[Union[service.ListTableSpecsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTableSpecsAsyncPager: + r"""Lists table specs in a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_list_table_specs(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListTableSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_table_specs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ListTableSpecsRequest, dict]]): + The request object. Request message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + parent (:class:`str`): + Required. The resource name of the + dataset to list table specs from. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager: + Response message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListTableSpecsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_table_specs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTableSpecsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_table_spec(self, + request: Optional[Union[service.UpdateTableSpecRequest, dict]] = None, + *, + table_spec: Optional[gca_table_spec.TableSpec] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_table_spec.TableSpec: + r"""Updates a table spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_update_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateTableSpecRequest( + ) + + # Make the request + response = await client.update_table_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.UpdateTableSpecRequest, dict]]): + The request object. Request message for + [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] + table_spec (:class:`google.cloud.automl_v1beta1.types.TableSpec`): + Required. The table spec which + replaces the resource on the server. + + This corresponds to the ``table_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_spec]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.UpdateTableSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_spec is not None: + request.table_spec = table_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_table_spec, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_spec.name", request.table_spec.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_column_spec(self, + request: Optional[Union[service.GetColumnSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> column_spec.ColumnSpec: + r"""Gets a column spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_get_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetColumnSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_column_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.GetColumnSpecRequest, dict]]): + The request object. Request message for + [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. + name (:class:`str`): + Required. The resource name of the + column spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetColumnSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_column_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_column_specs(self, + request: Optional[Union[service.ListColumnSpecsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListColumnSpecsAsyncPager: + r"""Lists column specs in a table spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_list_column_specs(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListColumnSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_column_specs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ListColumnSpecsRequest, dict]]): + The request object. Request message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + parent (:class:`str`): + Required. The resource name of the + table spec to list column specs from. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager: + Response message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListColumnSpecsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_column_specs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListColumnSpecsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_column_spec(self, + request: Optional[Union[service.UpdateColumnSpecRequest, dict]] = None, + *, + column_spec: Optional[gca_column_spec.ColumnSpec] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_column_spec.ColumnSpec: + r"""Updates a column spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_update_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateColumnSpecRequest( + ) + + # Make the request + response = await client.update_column_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest, dict]]): + The request object. Request message for + [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] + column_spec (:class:`google.cloud.automl_v1beta1.types.ColumnSpec`): + Required. The column spec which + replaces the resource on the server. + + This corresponds to the ``column_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([column_spec]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.UpdateColumnSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if column_spec is not None: + request.column_spec = column_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_column_spec, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("column_spec.name", request.column_spec.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_model(self, + request: Optional[Union[service.CreateModelRequest, dict]] = None, + *, + parent: Optional[str] = None, + model: Optional[gca_model.Model] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_create_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.CreateModelRequest, dict]]): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + parent (:class:`str`): + Required. Resource name of the parent + project where the model is being + created. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`google.cloud.automl_v1beta1.types.Model`): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.automl_v1beta1.types.Model` API + proto representing a trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.CreateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model(self, + request: Optional[Union[service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_get_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.GetModelRequest, dict]]): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + name (:class:`str`): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models(self, + request: Optional[Union[service.ListModelsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists models. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_list_models(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ListModelsRequest, dict]]): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + parent (:class:`str`): + Required. Resource name of the + project, from which to list the models. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model(self, + request: Optional[Union[service.DeleteModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_delete_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.DeleteModelRequest, dict]]): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + name (:class:`str`): + Required. Resource name of the model + being deleted. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_model(self, + request: Optional[Union[service.DeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_deploy_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.DeployModelRequest, dict]]): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + name (:class:`str`): + Required. Resource name of the model + to deploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model(self, + request: Optional[Union[service.UndeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_undeploy_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.UndeployModelRequest, dict]]): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. + name (:class:`str`): + Required. Resource name of the model + to undeploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_model(self, + request: Optional[Union[service.ExportModelRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.ModelExportOutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_export_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ExportModelRequest, dict]]): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (:class:`str`): + Required. The resource name of the + model to export. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.automl_v1beta1.types.ModelExportOutputConfig`): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def export_evaluated_examples(self, + request: Optional[Union[service.ExportEvaluatedExamplesRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.ExportEvaluatedExamplesOutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_export_evaluated_examples(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportEvaluatedExamplesRequest( + name="name_value", + ) + + # Make the request + operation = client.export_evaluated_examples(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest, dict]]): + The request object. Request message for + [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. + name (:class:`str`): + Required. The resource name of the + model whose evaluated examples are to be + exported. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig`): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ExportEvaluatedExamplesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_evaluated_examples, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation(self, + request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_get_model_evaluation(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.GetModelEvaluationRequest, dict]]): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. + name (:class:`str`): + Required. Resource name for the model + evaluation. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluations(self, + request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists model evaluations. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_list_model_evaluations(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest, dict]]): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + parent (:class:`str`): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "AutoMlAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "AutoMlAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py new file mode 100644 index 00000000..e738a43f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py @@ -0,0 +1,3335 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast + +from google.cloud.automl_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1beta1.services.auto_ml import pagers +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import data_types +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import AutoMlGrpcTransport +from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport +from .transports.rest import AutoMlRestTransport + + +class AutoMlClientMeta(type): + """Metaclass for the AutoMl client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] + _transport_registry["grpc"] = AutoMlGrpcTransport + _transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport + _transport_registry["rest"] = AutoMlRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[AutoMlTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoMlClient(metaclass=AutoMlClientMeta): + """AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AutoMlTransport: + """Returns the transport used by the client instance. + + Returns: + AutoMlTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: + """Returns a fully-qualified annotation_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + + @staticmethod + def parse_annotation_spec_path(path: str) -> Dict[str,str]: + """Parses a annotation_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def column_spec_path(project: str,location: str,dataset: str,table_spec: str,column_spec: str,) -> str: + """Returns a fully-qualified column_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, column_spec=column_spec, ) + + @staticmethod + def parse_column_spec_path(path: str) -> Dict[str,str]: + """Parses a column_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)/columnSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_path(project: str,location: str,model: str,model_evaluation: str,) -> str: + """Returns a fully-qualified model_evaluation string.""" + return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) + + @staticmethod + def parse_model_evaluation_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def table_spec_path(project: str,location: str,dataset: str,table_spec: str,) -> str: + """Returns a fully-qualified table_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, ) + + @staticmethod + def parse_table_spec_path(path: str) -> Dict[str,str]: + """Parses a table_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, AutoMlTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the auto ml client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, AutoMlTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoMlTransport): + # transport is a AutoMlTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_dataset(self, + request: Optional[Union[service.CreateDatasetRequest, dict]] = None, + *, + parent: Optional[str] = None, + dataset: Optional[gca_dataset.Dataset] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Creates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_create_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + response = client.create_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.CreateDatasetRequest, dict]): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. + parent (str): + Required. The resource name of the + project to create the dataset for. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (google.cloud.automl_v1beta1.types.Dataset): + Required. The dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateDatasetRequest): + request = service.CreateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_dataset(self, + request: Optional[Union[service.GetDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_get_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.GetDatasetRequest, dict]): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + name (str): + Required. The resource name of the + dataset to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetDatasetRequest): + request = service.GetDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_datasets(self, + request: Optional[Union[service.ListDatasetsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists datasets in a project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_list_datasets(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ListDatasetsRequest, dict]): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + parent (str): + Required. The resource name of the + project from which to list datasets. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsPager: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListDatasetsRequest): + request = service.ListDatasetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset(self, + request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, + *, + dataset: Optional[gca_dataset.Dataset] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_update_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.UpdateDatasetRequest, dict]): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] + dataset (google.cloud.automl_v1beta1.types.Dataset): + Required. The dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateDatasetRequest): + request = service.UpdateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_dataset(self, + request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_delete_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.DeleteDatasetRequest, dict]): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. + name (str): + Required. The resource name of the + dataset to delete. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteDatasetRequest): + request = service.DeleteDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def import_data(self, + request: Optional[Union[service.ImportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.InputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_import_data(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ImportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ImportDataRequest, dict]): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + name (str): + Required. Dataset name. Dataset must + already exist. All imported annotations + and examples will be added. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (google.cloud.automl_v1beta1.types.InputConfig): + Required. The desired input location + and its domain specific semantics, if + any. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ImportDataRequest): + request = service.ImportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_data(self, + request: Optional[Union[service.ExportDataRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.OutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_export_data(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ExportDataRequest, dict]): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + name (str): + Required. The resource name of the + dataset. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.automl_v1beta1.types.OutputConfig): + Required. The desired output + location. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportDataRequest): + request = service.ExportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec(self, + request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_get_annotation_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest, dict]): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. + name (str): + Required. The resource name of the + annotation spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.AnnotationSpec: + A definition of an annotation spec. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetAnnotationSpecRequest): + request = service.GetAnnotationSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_table_spec(self, + request: Optional[Union[service.GetTableSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table_spec.TableSpec: + r"""Gets a table spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_get_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetTableSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_table_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.GetTableSpecRequest, dict]): + The request object. Request message for + [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. + name (str): + Required. The resource name of the + table spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetTableSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetTableSpecRequest): + request = service.GetTableSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_table_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_table_specs(self, + request: Optional[Union[service.ListTableSpecsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTableSpecsPager: + r"""Lists table specs in a dataset. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_list_table_specs(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListTableSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_table_specs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ListTableSpecsRequest, dict]): + The request object. Request message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + parent (str): + Required. The resource name of the + dataset to list table specs from. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsPager: + Response message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListTableSpecsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListTableSpecsRequest): + request = service.ListTableSpecsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_table_specs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTableSpecsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_table_spec(self, + request: Optional[Union[service.UpdateTableSpecRequest, dict]] = None, + *, + table_spec: Optional[gca_table_spec.TableSpec] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_table_spec.TableSpec: + r"""Updates a table spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_update_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateTableSpecRequest( + ) + + # Make the request + response = client.update_table_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.UpdateTableSpecRequest, dict]): + The request object. Request message for + [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] + table_spec (google.cloud.automl_v1beta1.types.TableSpec): + Required. The table spec which + replaces the resource on the server. + + This corresponds to the ``table_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_spec]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateTableSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateTableSpecRequest): + request = service.UpdateTableSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_spec is not None: + request.table_spec = table_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_table_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_spec.name", request.table_spec.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_column_spec(self, + request: Optional[Union[service.GetColumnSpecRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> column_spec.ColumnSpec: + r"""Gets a column spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_get_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetColumnSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_column_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.GetColumnSpecRequest, dict]): + The request object. Request message for + [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. + name (str): + Required. The resource name of the + column spec to retrieve. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetColumnSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetColumnSpecRequest): + request = service.GetColumnSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_column_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_column_specs(self, + request: Optional[Union[service.ListColumnSpecsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListColumnSpecsPager: + r"""Lists column specs in a table spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_list_column_specs(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListColumnSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_column_specs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ListColumnSpecsRequest, dict]): + The request object. Request message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + parent (str): + Required. The resource name of the + table spec to list column specs from. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsPager: + Response message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListColumnSpecsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListColumnSpecsRequest): + request = service.ListColumnSpecsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_column_specs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListColumnSpecsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_column_spec(self, + request: Optional[Union[service.UpdateColumnSpecRequest, dict]] = None, + *, + column_spec: Optional[gca_column_spec.ColumnSpec] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_column_spec.ColumnSpec: + r"""Updates a column spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_update_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateColumnSpecRequest( + ) + + # Make the request + response = client.update_column_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest, dict]): + The request object. Request message for + [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] + column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): + Required. The column spec which + replaces the resource on the server. + + This corresponds to the ``column_spec`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([column_spec]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.UpdateColumnSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UpdateColumnSpecRequest): + request = service.UpdateColumnSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if column_spec is not None: + request.column_spec = column_spec + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_column_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("column_spec.name", request.column_spec.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_model(self, + request: Optional[Union[service.CreateModelRequest, dict]] = None, + *, + parent: Optional[str] = None, + model: Optional[gca_model.Model] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_create_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.CreateModelRequest, dict]): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + parent (str): + Required. Resource name of the parent + project where the model is being + created. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (google.cloud.automl_v1beta1.types.Model): + Required. The model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.automl_v1beta1.types.Model` API + proto representing a trained machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.CreateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.CreateModelRequest): + request = service.CreateModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gca_model.Model, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model(self, + request: Optional[Union[service.GetModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a model. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_get_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.GetModelRequest, dict]): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + name (str): + Required. Resource name of the model. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.Model: + API proto representing a trained + machine learning model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelRequest): + request = service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models(self, + request: Optional[Union[service.ListModelsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists models. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_list_models(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ListModelsRequest, dict]): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + parent (str): + Required. Resource name of the + project, from which to list the models. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsPager: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelsRequest): + request = service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model(self, + request: Optional[Union[service.DeleteModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_delete_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.DeleteModelRequest, dict]): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + name (str): + Required. Resource name of the model + being deleted. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeleteModelRequest): + request = service.DeleteModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_model(self, + request: Optional[Union[service.DeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_deploy_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.DeployModelRequest, dict]): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + name (str): + Required. Resource name of the model + to deploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.DeployModelRequest): + request = service.DeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model(self, + request: Optional[Union[service.UndeployModelRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_undeploy_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.UndeployModelRequest, dict]): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. + name (str): + Required. Resource name of the model + to undeploy. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.UndeployModelRequest): + request = service.UndeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_model(self, + request: Optional[Union[service.ExportModelRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.ModelExportOutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_export_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ExportModelRequest, dict]): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + name (str): + Required. The resource name of the + model to export. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.automl_v1beta1.types.ModelExportOutputConfig): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportModelRequest): + request = service.ExportModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def export_evaluated_examples(self, + request: Optional[Union[service.ExportEvaluatedExamplesRequest, dict]] = None, + *, + name: Optional[str] = None, + output_config: Optional[io.ExportEvaluatedExamplesOutputConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_export_evaluated_examples(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportEvaluatedExamplesRequest( + name="name_value", + ) + + # Make the request + operation = client.export_evaluated_examples(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest, dict]): + The request object. Request message for + [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. + name (str): + Required. The resource name of the + model whose evaluated examples are to be + exported. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ExportEvaluatedExamplesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ExportEvaluatedExamplesRequest): + request = service.ExportEvaluatedExamplesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_evaluated_examples] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation(self, + request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a model evaluation. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_get_model_evaluation(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.GetModelEvaluationRequest, dict]): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. + name (str): + Required. Resource name for the model + evaluation. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.ModelEvaluation: + Evaluation results of a model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.GetModelEvaluationRequest): + request = service.GetModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluations(self, + request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists model evaluations. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_list_model_evaluations(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest, dict]): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + parent (str): + Required. Resource name of the model + to list the model evaluations for. If + modelId is set as "-", this will list + model evaluations from across all models + of the parent location. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsPager: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, service.ListModelEvaluationsRequest): + request = service.ListModelEvaluationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "AutoMlClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + + + + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "AutoMlClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py new file mode 100644 index 00000000..4d4d2676 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py @@ -0,0 +1,628 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListDatasetsResponse], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListDatasetsResponse]], + request: service.ListDatasetsRequest, + response: service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTableSpecsPager: + """A pager for iterating through ``list_table_specs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``table_specs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTableSpecs`` requests and continue to iterate + through the ``table_specs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListTableSpecsResponse], + request: service.ListTableSpecsRequest, + response: service.ListTableSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListTableSpecsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListTableSpecsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTableSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListTableSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[table_spec.TableSpec]: + for page in self.pages: + yield from page.table_specs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTableSpecsAsyncPager: + """A pager for iterating through ``list_table_specs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``table_specs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTableSpecs`` requests and continue to iterate + through the ``table_specs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListTableSpecsResponse]], + request: service.ListTableSpecsRequest, + response: service.ListTableSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListTableSpecsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListTableSpecsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListTableSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListTableSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[table_spec.TableSpec]: + async def async_generator(): + async for page in self.pages: + for response in page.table_specs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListColumnSpecsPager: + """A pager for iterating through ``list_column_specs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``column_specs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListColumnSpecs`` requests and continue to iterate + through the ``column_specs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListColumnSpecsResponse], + request: service.ListColumnSpecsRequest, + response: service.ListColumnSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListColumnSpecsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListColumnSpecsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListColumnSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListColumnSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[column_spec.ColumnSpec]: + for page in self.pages: + yield from page.column_specs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListColumnSpecsAsyncPager: + """A pager for iterating through ``list_column_specs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``column_specs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListColumnSpecs`` requests and continue to iterate + through the ``column_specs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListColumnSpecsResponse]], + request: service.ListColumnSpecsRequest, + response: service.ListColumnSpecsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListColumnSpecsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListColumnSpecsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListColumnSpecsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListColumnSpecsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[column_spec.ColumnSpec]: + async def async_generator(): + async for page in self.pages: + for response in page.column_specs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListModelsResponse], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListModelsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.model + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``model`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListModelsResponse]], + request: service.ListModelsRequest, + response: service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListModelsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.model: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., service.ListModelEvaluationsResponse], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluation + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluation`` field on the + corresponding responses. + + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[service.ListModelEvaluationsResponse]], + request: service.ListModelEvaluationsRequest, + response: service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py new file mode 100644 index 00000000..9d86479d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoMlTransport +from .grpc import AutoMlGrpcTransport +from .grpc_asyncio import AutoMlGrpcAsyncIOTransport +from .rest import AutoMlRestTransport +from .rest import AutoMlRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] +_transport_registry['grpc'] = AutoMlGrpcTransport +_transport_registry['grpc_asyncio'] = AutoMlGrpcAsyncIOTransport +_transport_registry['rest'] = AutoMlRestTransport + +__all__ = ( + 'AutoMlTransport', + 'AutoMlGrpcTransport', + 'AutoMlGrpcAsyncIOTransport', + 'AutoMlRestTransport', + 'AutoMlRestInterceptor', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py new file mode 100644 index 00000000..2ed29d7c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py @@ -0,0 +1,570 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.automl_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class AutoMlTransport(abc.ABC): + """Abstract transport class for AutoMl.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'automl.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, + default_timeout=5.0, + client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, + default_timeout=5.0, + client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.get_table_spec: gapic_v1.method.wrap_method( + self.get_table_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_table_specs: gapic_v1.method.wrap_method( + self.list_table_specs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_table_spec: gapic_v1.method.wrap_method( + self.update_table_spec, + default_timeout=5.0, + client_info=client_info, + ), + self.get_column_spec: gapic_v1.method.wrap_method( + self.get_column_spec, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_column_specs: gapic_v1.method.wrap_method( + self.list_column_specs, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.update_column_spec: gapic_v1.method.wrap_method( + self.update_column_spec, + default_timeout=5.0, + client_info=client_info, + ), + self.create_model: gapic_v1.method.wrap_method( + self.create_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, + default_timeout=5.0, + client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, + default_timeout=5.0, + client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, + default_timeout=5.0, + client_info=client_info, + ), + self.export_evaluated_examples: gapic_v1.method.wrap_method( + self.export_evaluated_examples, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=5.0, + ), + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + Union[ + gca_dataset.Dataset, + Awaitable[gca_dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + Union[ + dataset.Dataset, + Awaitable[dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + Union[ + service.ListDatasetsResponse, + Awaitable[service.ListDatasetsResponse] + ]]: + raise NotImplementedError() + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + Union[ + gca_dataset.Dataset, + Awaitable[gca_dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + Union[ + annotation_spec.AnnotationSpec, + Awaitable[annotation_spec.AnnotationSpec] + ]]: + raise NotImplementedError() + + @property + def get_table_spec(self) -> Callable[ + [service.GetTableSpecRequest], + Union[ + table_spec.TableSpec, + Awaitable[table_spec.TableSpec] + ]]: + raise NotImplementedError() + + @property + def list_table_specs(self) -> Callable[ + [service.ListTableSpecsRequest], + Union[ + service.ListTableSpecsResponse, + Awaitable[service.ListTableSpecsResponse] + ]]: + raise NotImplementedError() + + @property + def update_table_spec(self) -> Callable[ + [service.UpdateTableSpecRequest], + Union[ + gca_table_spec.TableSpec, + Awaitable[gca_table_spec.TableSpec] + ]]: + raise NotImplementedError() + + @property + def get_column_spec(self) -> Callable[ + [service.GetColumnSpecRequest], + Union[ + column_spec.ColumnSpec, + Awaitable[column_spec.ColumnSpec] + ]]: + raise NotImplementedError() + + @property + def list_column_specs(self) -> Callable[ + [service.ListColumnSpecsRequest], + Union[ + service.ListColumnSpecsResponse, + Awaitable[service.ListColumnSpecsResponse] + ]]: + raise NotImplementedError() + + @property + def update_column_spec(self) -> Callable[ + [service.UpdateColumnSpecRequest], + Union[ + gca_column_spec.ColumnSpec, + Awaitable[gca_column_spec.ColumnSpec] + ]]: + raise NotImplementedError() + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + Union[ + service.ListModelsResponse, + Awaitable[service.ListModelsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_evaluated_examples(self) -> Callable[ + [service.ExportEvaluatedExamplesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + Union[ + model_evaluation.ModelEvaluation, + Awaitable[model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + Union[ + service.ListModelEvaluationsResponse, + Awaitable[service.ListModelEvaluationsResponse] + ]]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'AutoMlTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py new file mode 100644 index 00000000..f9aa5e51 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py @@ -0,0 +1,971 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.longrunning import operations_pb2 # type: ignore +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO + + +class AutoMlGrpcTransport(AutoMlTransport): + """gRPC backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + gca_dataset.Dataset]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/CreateDataset', + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetDataset', + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListDatasets', + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UpdateDataset', + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/DeleteDataset', + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ImportData', + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ExportData', + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec', + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def get_table_spec(self) -> Callable[ + [service.GetTableSpecRequest], + table_spec.TableSpec]: + r"""Return a callable for the get table spec method over gRPC. + + Gets a table spec. + + Returns: + Callable[[~.GetTableSpecRequest], + ~.TableSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_table_spec' not in self._stubs: + self._stubs['get_table_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetTableSpec', + request_serializer=service.GetTableSpecRequest.serialize, + response_deserializer=table_spec.TableSpec.deserialize, + ) + return self._stubs['get_table_spec'] + + @property + def list_table_specs(self) -> Callable[ + [service.ListTableSpecsRequest], + service.ListTableSpecsResponse]: + r"""Return a callable for the list table specs method over gRPC. + + Lists table specs in a dataset. + + Returns: + Callable[[~.ListTableSpecsRequest], + ~.ListTableSpecsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_table_specs' not in self._stubs: + self._stubs['list_table_specs'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs', + request_serializer=service.ListTableSpecsRequest.serialize, + response_deserializer=service.ListTableSpecsResponse.deserialize, + ) + return self._stubs['list_table_specs'] + + @property + def update_table_spec(self) -> Callable[ + [service.UpdateTableSpecRequest], + gca_table_spec.TableSpec]: + r"""Return a callable for the update table spec method over gRPC. + + Updates a table spec. + + Returns: + Callable[[~.UpdateTableSpecRequest], + ~.TableSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_table_spec' not in self._stubs: + self._stubs['update_table_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec', + request_serializer=service.UpdateTableSpecRequest.serialize, + response_deserializer=gca_table_spec.TableSpec.deserialize, + ) + return self._stubs['update_table_spec'] + + @property + def get_column_spec(self) -> Callable[ + [service.GetColumnSpecRequest], + column_spec.ColumnSpec]: + r"""Return a callable for the get column spec method over gRPC. + + Gets a column spec. + + Returns: + Callable[[~.GetColumnSpecRequest], + ~.ColumnSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_column_spec' not in self._stubs: + self._stubs['get_column_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec', + request_serializer=service.GetColumnSpecRequest.serialize, + response_deserializer=column_spec.ColumnSpec.deserialize, + ) + return self._stubs['get_column_spec'] + + @property + def list_column_specs(self) -> Callable[ + [service.ListColumnSpecsRequest], + service.ListColumnSpecsResponse]: + r"""Return a callable for the list column specs method over gRPC. + + Lists column specs in a table spec. + + Returns: + Callable[[~.ListColumnSpecsRequest], + ~.ListColumnSpecsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_column_specs' not in self._stubs: + self._stubs['list_column_specs'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs', + request_serializer=service.ListColumnSpecsRequest.serialize, + response_deserializer=service.ListColumnSpecsResponse.deserialize, + ) + return self._stubs['list_column_specs'] + + @property + def update_column_spec(self) -> Callable[ + [service.UpdateColumnSpecRequest], + gca_column_spec.ColumnSpec]: + r"""Return a callable for the update column spec method over gRPC. + + Updates a column spec. + + Returns: + Callable[[~.UpdateColumnSpecRequest], + ~.ColumnSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_column_spec' not in self._stubs: + self._stubs['update_column_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec', + request_serializer=service.UpdateColumnSpecRequest.serialize, + response_deserializer=gca_column_spec.ColumnSpec.deserialize, + ) + return self._stubs['update_column_spec'] + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model' not in self._stubs: + self._stubs['create_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/CreateModel', + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_model'] + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetModel', + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListModels', + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/DeleteModel', + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/DeployModel', + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UndeployModel', + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ExportModel', + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def export_evaluated_examples(self) -> Callable[ + [service.ExportEvaluatedExamplesRequest], + operations_pb2.Operation]: + r"""Return a callable for the export evaluated examples method over gRPC. + + Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportEvaluatedExamplesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_evaluated_examples' not in self._stubs: + self._stubs['export_evaluated_examples'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples', + request_serializer=service.ExportEvaluatedExamplesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_evaluated_examples'] + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation', + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + service.ListModelEvaluationsResponse]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations', + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'AutoMlGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py new file mode 100644 index 00000000..bfb03112 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py @@ -0,0 +1,970 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.longrunning import operations_pb2 # type: ignore +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO +from .grpc import AutoMlGrpcTransport + + +class AutoMlGrpcAsyncIOTransport(AutoMlTransport): + """gRPC AsyncIO backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/CreateDataset', + request_serializer=service.CreateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetDataset', + request_serializer=service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + Awaitable[service.ListDatasetsResponse]]: + r"""Return a callable for the list datasets method over gRPC. + + Lists datasets in a project. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListDatasets', + request_serializer=service.ListDatasetsRequest.serialize, + response_deserializer=service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UpdateDataset', + request_serializer=service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a dataset and all of its contents. Returns empty + response in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/DeleteDataset', + request_serializer=service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A + [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] + parameter must be explicitly set. Returns an empty response + in the [response][google.longrunning.Operation.response] + field when it completes. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ImportData', + request_serializer=service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports dataset's data to the provided output location. Returns + an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ExportData', + request_serializer=service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + Awaitable[annotation_spec.AnnotationSpec]]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an annotation spec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec', + request_serializer=service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def get_table_spec(self) -> Callable[ + [service.GetTableSpecRequest], + Awaitable[table_spec.TableSpec]]: + r"""Return a callable for the get table spec method over gRPC. + + Gets a table spec. + + Returns: + Callable[[~.GetTableSpecRequest], + Awaitable[~.TableSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_table_spec' not in self._stubs: + self._stubs['get_table_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetTableSpec', + request_serializer=service.GetTableSpecRequest.serialize, + response_deserializer=table_spec.TableSpec.deserialize, + ) + return self._stubs['get_table_spec'] + + @property + def list_table_specs(self) -> Callable[ + [service.ListTableSpecsRequest], + Awaitable[service.ListTableSpecsResponse]]: + r"""Return a callable for the list table specs method over gRPC. + + Lists table specs in a dataset. + + Returns: + Callable[[~.ListTableSpecsRequest], + Awaitable[~.ListTableSpecsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_table_specs' not in self._stubs: + self._stubs['list_table_specs'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs', + request_serializer=service.ListTableSpecsRequest.serialize, + response_deserializer=service.ListTableSpecsResponse.deserialize, + ) + return self._stubs['list_table_specs'] + + @property + def update_table_spec(self) -> Callable[ + [service.UpdateTableSpecRequest], + Awaitable[gca_table_spec.TableSpec]]: + r"""Return a callable for the update table spec method over gRPC. + + Updates a table spec. + + Returns: + Callable[[~.UpdateTableSpecRequest], + Awaitable[~.TableSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_table_spec' not in self._stubs: + self._stubs['update_table_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec', + request_serializer=service.UpdateTableSpecRequest.serialize, + response_deserializer=gca_table_spec.TableSpec.deserialize, + ) + return self._stubs['update_table_spec'] + + @property + def get_column_spec(self) -> Callable[ + [service.GetColumnSpecRequest], + Awaitable[column_spec.ColumnSpec]]: + r"""Return a callable for the get column spec method over gRPC. + + Gets a column spec. + + Returns: + Callable[[~.GetColumnSpecRequest], + Awaitable[~.ColumnSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_column_spec' not in self._stubs: + self._stubs['get_column_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec', + request_serializer=service.GetColumnSpecRequest.serialize, + response_deserializer=column_spec.ColumnSpec.deserialize, + ) + return self._stubs['get_column_spec'] + + @property + def list_column_specs(self) -> Callable[ + [service.ListColumnSpecsRequest], + Awaitable[service.ListColumnSpecsResponse]]: + r"""Return a callable for the list column specs method over gRPC. + + Lists column specs in a table spec. + + Returns: + Callable[[~.ListColumnSpecsRequest], + Awaitable[~.ListColumnSpecsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_column_specs' not in self._stubs: + self._stubs['list_column_specs'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs', + request_serializer=service.ListColumnSpecsRequest.serialize, + response_deserializer=service.ListColumnSpecsResponse.deserialize, + ) + return self._stubs['list_column_specs'] + + @property + def update_column_spec(self) -> Callable[ + [service.UpdateColumnSpecRequest], + Awaitable[gca_column_spec.ColumnSpec]]: + r"""Return a callable for the update column spec method over gRPC. + + Updates a column spec. + + Returns: + Callable[[~.UpdateColumnSpecRequest], + Awaitable[~.ColumnSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_column_spec' not in self._stubs: + self._stubs['update_column_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec', + request_serializer=service.UpdateColumnSpecRequest.serialize, + response_deserializer=gca_column_spec.ColumnSpec.deserialize, + ) + return self._stubs['update_column_spec'] + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create model method over gRPC. + + Creates a model. Returns a Model in the + [response][google.longrunning.Operation.response] field when it + completes. When you create a model, several model evaluations + are created for it: a global evaluation, and one evaluation for + each annotation spec. + + Returns: + Callable[[~.CreateModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model' not in self._stubs: + self._stubs['create_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/CreateModel', + request_serializer=service.CreateModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_model'] + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetModel', + request_serializer=service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + Awaitable[service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists models. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListModels', + request_serializer=service.ListModelsRequest.serialize, + response_deserializer=service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a model. Returns ``google.protobuf.Empty`` in the + [response][google.longrunning.Operation.response] field when it + completes, and ``delete_details`` in the + [metadata][google.longrunning.Operation.metadata] field. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/DeleteModel', + request_serializer=service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a model. If a model is already deployed, deploying it + with the same parameters has no effect. Deploying with different + parametrs (as e.g. changing + + [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + will reset the deployment state without pausing the model's + availability. + + Only applicable for Text Classification, Image Object Detection + , Tables, and Image Segmentation; all other domains manage + deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/DeployModel', + request_serializer=service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a model. If the model is not deployed this method has + no effect. + + Only applicable for Text Classification, Image Object Detection + and Tables; all other domains manage deployment automatically. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/UndeployModel', + request_serializer=service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, "export-able", model to a user specified + Google Cloud Storage location. A model is considered export-able + if and only if it has an export format defined for it in + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ExportModel', + request_serializer=service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def export_evaluated_examples(self) -> Callable[ + [service.ExportEvaluatedExamplesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export evaluated examples method over gRPC. + + Exports examples on which the model was evaluated (i.e. which + were in the TEST set of the dataset the model was created from), + together with their ground truth annotations and the annotations + created (predicted) by the model. The examples, ground truth and + predictions are exported in the state they were at the moment + the model was evaluated. + + This export is available only for 30 days since the model + evaluation is created. + + Currently only available for Tables. + + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it + completes. + + Returns: + Callable[[~.ExportEvaluatedExamplesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_evaluated_examples' not in self._stubs: + self._stubs['export_evaluated_examples'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples', + request_serializer=service.ExportEvaluatedExamplesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_evaluated_examples'] + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + Awaitable[model_evaluation.ModelEvaluation]]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a model evaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation', + request_serializer=service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + Awaitable[service.ListModelEvaluationsResponse]]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists model evaluations. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations', + request_serializer=service.ListModelEvaluationsRequest.serialize, + response_deserializer=service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ( + 'AutoMlGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py new file mode 100644 index 00000000..c1876de4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py @@ -0,0 +1,3091 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.longrunning import operations_pb2 # type: ignore + +from .base import AutoMlTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class AutoMlRestInterceptor: + """Interceptor for AutoMl. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the AutoMlRestTransport. + + .. code-block:: python + class MyCustomAutoMlInterceptor(AutoMlRestInterceptor): + def pre_create_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_deploy_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_deploy_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_export_data(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_export_data(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_export_evaluated_examples(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_export_evaluated_examples(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_export_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_export_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_annotation_spec(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_annotation_spec(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_column_spec(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_column_spec(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_model_evaluation(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model_evaluation(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_table_spec(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_table_spec(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_import_data(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_import_data(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_column_specs(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_column_specs(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_datasets(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_datasets(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_model_evaluations(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_model_evaluations(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_models(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_models(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_table_specs(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_table_specs(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_undeploy_model(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_undeploy_model(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_column_spec(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_column_spec(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_dataset(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_dataset(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_table_spec(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_table_spec(self, response): + logging.log(f"Received response: {response}") + return response + + transport = AutoMlRestTransport(interceptor=MyCustomAutoMlInterceptor()) + client = AutoMlClient(transport=transport) + + + """ + def pre_create_dataset(self, request: service.CreateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_create_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Dataset: + """Post-rpc interceptor for create_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_create_model(self, request: service.CreateModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_create_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_delete_dataset(self, request: service.DeleteDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_delete_dataset(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_delete_model(self, request: service.DeleteModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_delete_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_deploy_model(self, request: service.DeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for deploy_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_deploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for deploy_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_export_data(self, request: service.ExportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for export_data + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_export_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for export_data + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_export_evaluated_examples(self, request: service.ExportEvaluatedExamplesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportEvaluatedExamplesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for export_evaluated_examples + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_export_evaluated_examples(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for export_evaluated_examples + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_export_model(self, request: service.ExportModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for export_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_export_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for export_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_annotation_spec(self, request: service.GetAnnotationSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetAnnotationSpecRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_annotation_spec + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_annotation_spec(self, response: annotation_spec.AnnotationSpec) -> annotation_spec.AnnotationSpec: + """Post-rpc interceptor for get_annotation_spec + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_column_spec(self, request: service.GetColumnSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetColumnSpecRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_column_spec + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_column_spec(self, response: column_spec.ColumnSpec) -> column_spec.ColumnSpec: + """Post-rpc interceptor for get_column_spec + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_dataset(self, request: service.GetDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: + """Post-rpc interceptor for get_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_model(self, request: service.GetModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_model(self, response: model.Model) -> model.Model: + """Post-rpc interceptor for get_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_model_evaluation(self, request: service.GetModelEvaluationRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelEvaluationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_model_evaluation + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_model_evaluation(self, response: model_evaluation.ModelEvaluation) -> model_evaluation.ModelEvaluation: + """Post-rpc interceptor for get_model_evaluation + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_get_table_spec(self, request: service.GetTableSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetTableSpecRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_table_spec + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_get_table_spec(self, response: table_spec.TableSpec) -> table_spec.TableSpec: + """Post-rpc interceptor for get_table_spec + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_import_data(self, request: service.ImportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for import_data + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_import_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for import_data + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_column_specs(self, request: service.ListColumnSpecsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListColumnSpecsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_column_specs + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_column_specs(self, response: service.ListColumnSpecsResponse) -> service.ListColumnSpecsResponse: + """Post-rpc interceptor for list_column_specs + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_datasets(self, request: service.ListDatasetsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_datasets + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_datasets(self, response: service.ListDatasetsResponse) -> service.ListDatasetsResponse: + """Post-rpc interceptor for list_datasets + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_model_evaluations(self, request: service.ListModelEvaluationsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelEvaluationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_model_evaluations + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_model_evaluations(self, response: service.ListModelEvaluationsResponse) -> service.ListModelEvaluationsResponse: + """Post-rpc interceptor for list_model_evaluations + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_models(self, request: service.ListModelsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_models + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_models(self, response: service.ListModelsResponse) -> service.ListModelsResponse: + """Post-rpc interceptor for list_models + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_list_table_specs(self, request: service.ListTableSpecsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListTableSpecsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_table_specs + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_list_table_specs(self, response: service.ListTableSpecsResponse) -> service.ListTableSpecsResponse: + """Post-rpc interceptor for list_table_specs + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_undeploy_model(self, request: service.UndeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for undeploy_model + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_undeploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for undeploy_model + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_update_column_spec(self, request: service.UpdateColumnSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateColumnSpecRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_column_spec + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_update_column_spec(self, response: gca_column_spec.ColumnSpec) -> gca_column_spec.ColumnSpec: + """Post-rpc interceptor for update_column_spec + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_update_dataset(self, request: service.UpdateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_dataset + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_update_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Dataset: + """Post-rpc interceptor for update_dataset + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + def pre_update_table_spec(self, request: service.UpdateTableSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateTableSpecRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_table_spec + + Override in a subclass to manipulate the request or metadata + before they are sent to the AutoMl server. + """ + return request, metadata + + def post_update_table_spec(self, response: gca_table_spec.TableSpec) -> gca_table_spec.TableSpec: + """Post-rpc interceptor for update_table_spec + + Override in a subclass to manipulate the response + after it is returned by the AutoMl server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class AutoMlRestStub: + _session: AuthorizedSession + _host: str + _interceptor: AutoMlRestInterceptor + + +class AutoMlRestTransport(AutoMlTransport): + """REST backend transport for AutoMl. + + AutoML Server API. + + The resource names are assigned by the server. The server never + reuses names that it has created after the resources with those + names are deleted. + + An ID of a resource is the last element of the item's resource name. + For + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, + then the id for the item is ``{dataset_id}``. + + Currently the only supported ``location_id`` is "us-central1". + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[AutoMlRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or AutoMlRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + 'google.longrunning.Operations.CancelOperation': [ + { + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:cancel', + 'body': '*', + }, + ], + 'google.longrunning.Operations.DeleteOperation': [ + { + 'method': 'delete', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.GetOperation': [ + { + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.ListOperations': [ + { + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*}/operations', + }, + ], + 'google.longrunning.Operations.WaitOperation': [ + { + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:wait', + 'body': '*', + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta1") + + self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) + + # Return the client from cache. + return self._operations_client + + class _CreateDataset(AutoMlRestStub): + def __hash__(self): + return hash("CreateDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.CreateDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> gca_dataset.Dataset: + r"""Call the create dataset method over HTTP. + + Args: + request (~.service.CreateDatasetRequest): + The request object. Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{parent=projects/*/locations/*}/datasets', + 'body': 'dataset', + }, + ] + request, metadata = self._interceptor.pre_create_dataset(request, metadata) + pb_request = service.CreateDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_dataset.Dataset() + pb_resp = gca_dataset.Dataset.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_dataset(resp) + return resp + + class _CreateModel(AutoMlRestStub): + def __hash__(self): + return hash("CreateModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.CreateModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create model method over HTTP. + + Args: + request (~.service.CreateModelRequest): + The request object. Request message for + [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{parent=projects/*/locations/*}/models', + 'body': 'model', + }, + ] + request, metadata = self._interceptor.pre_create_model(request, metadata) + pb_request = service.CreateModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_model(resp) + return resp + + class _DeleteDataset(AutoMlRestStub): + def __hash__(self): + return hash("DeleteDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.DeleteDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the delete dataset method over HTTP. + + Args: + request (~.service.DeleteDatasetRequest): + The request object. Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_dataset(request, metadata) + pb_request = service.DeleteDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_dataset(resp) + return resp + + class _DeleteModel(AutoMlRestStub): + def __hash__(self): + return hash("DeleteModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.DeleteModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the delete model method over HTTP. + + Args: + request (~.service.DeleteModelRequest): + The request object. Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_model(request, metadata) + pb_request = service.DeleteModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_model(resp) + return resp + + class _DeployModel(AutoMlRestStub): + def __hash__(self): + return hash("DeployModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.DeployModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the deploy model method over HTTP. + + Args: + request (~.service.DeployModelRequest): + The request object. Request message for + [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:deploy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_deploy_model(request, metadata) + pb_request = service.DeployModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_deploy_model(resp) + return resp + + class _ExportData(AutoMlRestStub): + def __hash__(self): + return hash("ExportData") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ExportDataRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the export data method over HTTP. + + Args: + request (~.service.ExportDataRequest): + The request object. Request message for + [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}:exportData', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_export_data(request, metadata) + pb_request = service.ExportDataRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_data(resp) + return resp + + class _ExportEvaluatedExamples(AutoMlRestStub): + def __hash__(self): + return hash("ExportEvaluatedExamples") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ExportEvaluatedExamplesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the export evaluated examples method over HTTP. + + Args: + request (~.service.ExportEvaluatedExamplesRequest): + The request object. Request message for + [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_export_evaluated_examples(request, metadata) + pb_request = service.ExportEvaluatedExamplesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_evaluated_examples(resp) + return resp + + class _ExportModel(AutoMlRestStub): + def __hash__(self): + return hash("ExportModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ExportModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the export model method over HTTP. + + Args: + request (~.service.ExportModelRequest): + The request object. Request message for + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an + error code will be returned. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:export', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_export_model(request, metadata) + pb_request = service.ExportModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_export_model(resp) + return resp + + class _GetAnnotationSpec(AutoMlRestStub): + def __hash__(self): + return hash("GetAnnotationSpec") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetAnnotationSpecRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> annotation_spec.AnnotationSpec: + r"""Call the get annotation spec method over HTTP. + + Args: + request (~.service.GetAnnotationSpecRequest): + The request object. Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.annotation_spec.AnnotationSpec: + A definition of an annotation spec. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}', + }, + ] + request, metadata = self._interceptor.pre_get_annotation_spec(request, metadata) + pb_request = service.GetAnnotationSpecRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = annotation_spec.AnnotationSpec() + pb_resp = annotation_spec.AnnotationSpec.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_annotation_spec(resp) + return resp + + class _GetColumnSpec(AutoMlRestStub): + def __hash__(self): + return hash("GetColumnSpec") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetColumnSpecRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> column_spec.ColumnSpec: + r"""Call the get column spec method over HTTP. + + Args: + request (~.service.GetColumnSpecRequest): + The request object. Request message for + [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.column_spec.ColumnSpec: + A representation of a column in a relational table. When + listing them, column specs are returned in the same + order in which they were given on import . Used by: + + - Tables + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}', + }, + ] + request, metadata = self._interceptor.pre_get_column_spec(request, metadata) + pb_request = service.GetColumnSpecRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = column_spec.ColumnSpec() + pb_resp = column_spec.ColumnSpec.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_column_spec(resp) + return resp + + class _GetDataset(AutoMlRestStub): + def __hash__(self): + return hash("GetDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> dataset.Dataset: + r"""Call the get dataset method over HTTP. + + Args: + request (~.service.GetDatasetRequest): + The request object. Request message for + [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}', + }, + ] + request, metadata = self._interceptor.pre_get_dataset(request, metadata) + pb_request = service.GetDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = dataset.Dataset() + pb_resp = dataset.Dataset.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_dataset(resp) + return resp + + class _GetModel(AutoMlRestStub): + def __hash__(self): + return hash("GetModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> model.Model: + r"""Call the get model method over HTTP. + + Args: + request (~.service.GetModelRequest): + The request object. Request message for + [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model.Model: + API proto representing a trained + machine learning model. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}', + }, + ] + request, metadata = self._interceptor.pre_get_model(request, metadata) + pb_request = service.GetModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model.Model() + pb_resp = model.Model.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model(resp) + return resp + + class _GetModelEvaluation(AutoMlRestStub): + def __hash__(self): + return hash("GetModelEvaluation") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetModelEvaluationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> model_evaluation.ModelEvaluation: + r"""Call the get model evaluation method over HTTP. + + Args: + request (~.service.GetModelEvaluationRequest): + The request object. Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_evaluation.ModelEvaluation: + Evaluation results of a model. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}', + }, + ] + request, metadata = self._interceptor.pre_get_model_evaluation(request, metadata) + pb_request = service.GetModelEvaluationRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_evaluation.ModelEvaluation() + pb_resp = model_evaluation.ModelEvaluation.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model_evaluation(resp) + return resp + + class _GetTableSpec(AutoMlRestStub): + def __hash__(self): + return hash("GetTableSpec") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.GetTableSpecRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> table_spec.TableSpec: + r"""Call the get table spec method over HTTP. + + Args: + request (~.service.GetTableSpecRequest): + The request object. Request message for + [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table_spec.TableSpec: + A specification of a relational table. The table's + schema is represented via its child column specs. It is + pre-populated as part of ImportData by schema inference + algorithm, the version of which is a required parameter + of ImportData InputConfig. Note: While working with a + table, at times the schema may be inconsistent with the + data in the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a model. + Used by: + + - Tables + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}', + }, + ] + request, metadata = self._interceptor.pre_get_table_spec(request, metadata) + pb_request = service.GetTableSpecRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table_spec.TableSpec() + pb_resp = table_spec.TableSpec.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_table_spec(resp) + return resp + + class _ImportData(AutoMlRestStub): + def __hash__(self): + return hash("ImportData") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ImportDataRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the import data method over HTTP. + + Args: + request (~.service.ImportDataRequest): + The request object. Request message for + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}:importData', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_import_data(request, metadata) + pb_request = service.ImportDataRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_import_data(resp) + return resp + + class _ListColumnSpecs(AutoMlRestStub): + def __hash__(self): + return hash("ListColumnSpecs") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListColumnSpecsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListColumnSpecsResponse: + r"""Call the list column specs method over HTTP. + + Args: + request (~.service.ListColumnSpecsRequest): + The request object. Request message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListColumnSpecsResponse: + Response message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs', + }, + ] + request, metadata = self._interceptor.pre_list_column_specs(request, metadata) + pb_request = service.ListColumnSpecsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListColumnSpecsResponse() + pb_resp = service.ListColumnSpecsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_column_specs(resp) + return resp + + class _ListDatasets(AutoMlRestStub): + def __hash__(self): + return hash("ListDatasets") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListDatasetsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListDatasetsResponse: + r"""Call the list datasets method over HTTP. + + Args: + request (~.service.ListDatasetsRequest): + The request object. Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListDatasetsResponse: + Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{parent=projects/*/locations/*}/datasets', + }, + ] + request, metadata = self._interceptor.pre_list_datasets(request, metadata) + pb_request = service.ListDatasetsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListDatasetsResponse() + pb_resp = service.ListDatasetsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_datasets(resp) + return resp + + class _ListModelEvaluations(AutoMlRestStub): + def __hash__(self): + return hash("ListModelEvaluations") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListModelEvaluationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListModelEvaluationsResponse: + r"""Call the list model evaluations method over HTTP. + + Args: + request (~.service.ListModelEvaluationsRequest): + The request object. Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListModelEvaluationsResponse: + Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations', + }, + ] + request, metadata = self._interceptor.pre_list_model_evaluations(request, metadata) + pb_request = service.ListModelEvaluationsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListModelEvaluationsResponse() + pb_resp = service.ListModelEvaluationsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_model_evaluations(resp) + return resp + + class _ListModels(AutoMlRestStub): + def __hash__(self): + return hash("ListModels") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListModelsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListModelsResponse: + r"""Call the list models method over HTTP. + + Args: + request (~.service.ListModelsRequest): + The request object. Request message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListModelsResponse: + Response message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{parent=projects/*/locations/*}/models', + }, + ] + request, metadata = self._interceptor.pre_list_models(request, metadata) + pb_request = service.ListModelsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListModelsResponse() + pb_resp = service.ListModelsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_models(resp) + return resp + + class _ListTableSpecs(AutoMlRestStub): + def __hash__(self): + return hash("ListTableSpecs") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.ListTableSpecsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> service.ListTableSpecsResponse: + r"""Call the list table specs method over HTTP. + + Args: + request (~.service.ListTableSpecsRequest): + The request object. Request message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.service.ListTableSpecsResponse: + Response message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs', + }, + ] + request, metadata = self._interceptor.pre_list_table_specs(request, metadata) + pb_request = service.ListTableSpecsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = service.ListTableSpecsResponse() + pb_resp = service.ListTableSpecsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_table_specs(resp) + return resp + + class _UndeployModel(AutoMlRestStub): + def __hash__(self): + return hash("UndeployModel") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.UndeployModelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the undeploy model method over HTTP. + + Args: + request (~.service.UndeployModelRequest): + The request object. Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:undeploy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_undeploy_model(request, metadata) + pb_request = service.UndeployModelRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undeploy_model(resp) + return resp + + class _UpdateColumnSpec(AutoMlRestStub): + def __hash__(self): + return hash("UpdateColumnSpec") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.UpdateColumnSpecRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> gca_column_spec.ColumnSpec: + r"""Call the update column spec method over HTTP. + + Args: + request (~.service.UpdateColumnSpecRequest): + The request object. Request message for + [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_column_spec.ColumnSpec: + A representation of a column in a relational table. When + listing them, column specs are returned in the same + order in which they were given on import . Used by: + + - Tables + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}', + 'body': 'column_spec', + }, + ] + request, metadata = self._interceptor.pre_update_column_spec(request, metadata) + pb_request = service.UpdateColumnSpecRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_column_spec.ColumnSpec() + pb_resp = gca_column_spec.ColumnSpec.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_column_spec(resp) + return resp + + class _UpdateDataset(AutoMlRestStub): + def __hash__(self): + return hash("UpdateDataset") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.UpdateDatasetRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> gca_dataset.Dataset: + r"""Call the update dataset method over HTTP. + + Args: + request (~.service.UpdateDatasetRequest): + The request object. Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_dataset.Dataset: + A workspace for solving a single, + particular machine learning (ML) + problem. A workspace contains examples + that may be annotated. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}', + 'body': 'dataset', + }, + ] + request, metadata = self._interceptor.pre_update_dataset(request, metadata) + pb_request = service.UpdateDatasetRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_dataset.Dataset() + pb_resp = gca_dataset.Dataset.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_dataset(resp) + return resp + + class _UpdateTableSpec(AutoMlRestStub): + def __hash__(self): + return hash("UpdateTableSpec") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: service.UpdateTableSpecRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> gca_table_spec.TableSpec: + r"""Call the update table spec method over HTTP. + + Args: + request (~.service.UpdateTableSpecRequest): + The request object. Request message for + [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_table_spec.TableSpec: + A specification of a relational table. The table's + schema is represented via its child column specs. It is + pre-populated as part of ImportData by schema inference + algorithm, the version of which is a required parameter + of ImportData InputConfig. Note: While working with a + table, at times the schema may be inconsistent with the + data in the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a model. + Used by: + + - Tables + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}', + 'body': 'table_spec', + }, + ] + request, metadata = self._interceptor.pre_update_table_spec(request, metadata) + pb_request = service.UpdateTableSpecRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_table_spec.TableSpec() + pb_resp = gca_table_spec.TableSpec.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_table_spec(resp) + return resp + + @property + def create_dataset(self) -> Callable[ + [service.CreateDatasetRequest], + gca_dataset.Dataset]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_model(self) -> Callable[ + [service.CreateModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_dataset(self) -> Callable[ + [service.DeleteDatasetRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_model(self) -> Callable[ + [service.DeleteModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def deploy_model(self) -> Callable[ + [service.DeployModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeployModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def export_data(self) -> Callable[ + [service.ExportDataRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExportData(self._session, self._host, self._interceptor) # type: ignore + + @property + def export_evaluated_examples(self) -> Callable[ + [service.ExportEvaluatedExamplesRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExportEvaluatedExamples(self._session, self._host, self._interceptor) # type: ignore + + @property + def export_model(self) -> Callable[ + [service.ExportModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExportModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_annotation_spec(self) -> Callable[ + [service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAnnotationSpec(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_column_spec(self) -> Callable[ + [service.GetColumnSpecRequest], + column_spec.ColumnSpec]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetColumnSpec(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_dataset(self) -> Callable[ + [service.GetDatasetRequest], + dataset.Dataset]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_model(self) -> Callable[ + [service.GetModelRequest], + model.Model]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_model_evaluation(self) -> Callable[ + [service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModelEvaluation(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_table_spec(self) -> Callable[ + [service.GetTableSpecRequest], + table_spec.TableSpec]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTableSpec(self._session, self._host, self._interceptor) # type: ignore + + @property + def import_data(self) -> Callable[ + [service.ImportDataRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ImportData(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_column_specs(self) -> Callable[ + [service.ListColumnSpecsRequest], + service.ListColumnSpecsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListColumnSpecs(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_datasets(self) -> Callable[ + [service.ListDatasetsRequest], + service.ListDatasetsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListDatasets(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_model_evaluations(self) -> Callable[ + [service.ListModelEvaluationsRequest], + service.ListModelEvaluationsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModelEvaluations(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_models(self) -> Callable[ + [service.ListModelsRequest], + service.ListModelsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModels(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_table_specs(self) -> Callable[ + [service.ListTableSpecsRequest], + service.ListTableSpecsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTableSpecs(self._session, self._host, self._interceptor) # type: ignore + + @property + def undeploy_model(self) -> Callable[ + [service.UndeployModelRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UndeployModel(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_column_spec(self) -> Callable[ + [service.UpdateColumnSpecRequest], + gca_column_spec.ColumnSpec]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateColumnSpec(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_dataset(self) -> Callable[ + [service.UpdateDatasetRequest], + gca_dataset.Dataset]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateDataset(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_table_spec(self) -> Callable[ + [service.UpdateTableSpecRequest], + gca_table_spec.TableSpec]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateTableSpec(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'AutoMlRestTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py new file mode 100644 index 00000000..905b8c43 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient + +__all__ = ( + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py new file mode 100644 index 00000000..6144f9b3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py @@ -0,0 +1,621 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union + +from google.cloud.automl_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import prediction_service +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + model_path = staticmethod(PredictionServiceClient.model_path) + parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) + common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PredictionServiceClient.common_project_path) + parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) + common_location_path = staticmethod(PredictionServiceClient.common_location_path) + parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PredictionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def predict(self, + request: Optional[Union[prediction_service.PredictRequest, dict]] = None, + *, + name: Optional[str] = None, + payload: Optional[data_items.ExamplePayload] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + payload = automl_v1beta1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1beta1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.PredictRequest, dict]]): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + name (:class:`str`): + Required. Name of the model requested + to serve the prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (:class:`google.cloud.automl_v1beta1.types.ExamplePayload`): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`MutableMapping[str, str]`): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: ``score_threshold`` - + (float) When Model detects objects on the image, it + will only produce bounding boxes which have at least + this confidence score. Value in 0 to 1 range, default + is 0.5. ``max_bounding_box_count`` - (int64) No more + than this number of bounding boxes will be returned + in the response. Default is 100, the requested value + may be limited by server. + + - For Tables: feature_importance - (boolean) Whether + feature importance should be populated in the + returned TablesAnnotation. The default is false. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, payload, params]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + + if params: + request.params.update(params) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_predict(self, + request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.BatchPredictInputConfig] = None, + output_config: Optional[io.BatchPredictOutputConfig] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + async def sample_batch_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.BatchPredictRequest( + name="name_value", + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.automl_v1beta1.types.BatchPredictRequest, dict]]): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + name (:class:`str`): + Required. Name of the model requested + to serve the batch prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (:class:`google.cloud.automl_v1beta1.types.BatchPredictInputConfig`): + Required. The input configuration for + batch prediction. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.automl_v1beta1.types.BatchPredictOutputConfig`): + Required. The Configuration + specifying where output predictions + should be written. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (:class:`MutableMapping[str, str]`): + Required. Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + - For Text Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a text + snippet, it will only produce results that have at + least this confidence score. The default is 0.5. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: + + ``score_threshold`` - (float) When Model detects + objects on the image, it will only produce bounding + boxes which have at least this confidence score. + Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be produced per + image. Default is 100, the requested value may be + limited by server. + + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a video, it + will only produce results that have at least this + confidence score. The default is 0.5. + ``segment_classification`` - (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence + scores for the entire segment of the video that user + specified in the request configuration. The default + is "true". ``shot_classification`` - (boolean) Set to + true to request shot-level classification. AutoML + Video Intelligence determines the boundaries for each + camera shot in the entire segment of the video that + user specified in the request configuration. AutoML + Video Intelligence then returns labels and their + confidence scores for each detected shot, along with + the start and end time of the shot. WARNING: Model + evaluation is not done for this classification type, + the quality of it depends on training data, but there + are no metrics provided to describe that quality. The + default is "false". ``1s_interval_classification`` - + (boolean) Set to true to request classification for a + video at one-second intervals. AutoML Video + Intelligence returns labels and their confidence + scores for each second of the entire segment of the + video that user specified in the request + configuration. WARNING: Model evaluation is not done + for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. The default is + "false". + + - For Tables: + + feature_importance - (boolean) Whether feature + importance should be populated in the returned + TablesAnnotations. The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects + objects on video frames, it will only produce + bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be returned per + frame. Default is 100, the requested value may be + limited by server. ``min_bounding_box_size`` - + (float) Only bounding boxes with shortest edge at + least that long as a relative value of video frame + size will be returned. Value in 0 to 1 range. Default + is 0. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.automl_v1beta1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config, output_config, params]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.BatchPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + + if params: + request.params.update(params) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_predict, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "PredictionServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "PredictionServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py new file mode 100644 index 00000000..7894085e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py @@ -0,0 +1,823 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast + +from google.cloud.automl_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import prediction_service +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .transports.rest import PredictionServiceRestTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + _transport_registry["rest"] = PredictionServiceRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[PredictionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "automl.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, PredictionServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def predict(self, + request: Optional[Union[prediction_service.PredictRequest, dict]] = None, + *, + name: Optional[str] = None, + payload: Optional[data_items.ExamplePayload] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + payload = automl_v1beta1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1beta1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.PredictRequest, dict]): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + name (str): + Required. Name of the model requested + to serve the prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + payload (google.cloud.automl_v1beta1.types.ExamplePayload): + Required. Payload to perform a + prediction on. The payload must match + the problem type that the model was + trained to solve. + + This corresponds to the ``payload`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (MutableMapping[str, str]): + Additional domain-specific parameters, any string must + be up to 25000 characters long. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: ``score_threshold`` - + (float) When Model detects objects on the image, it + will only produce bounding boxes which have at least + this confidence score. Value in 0 to 1 range, default + is 0.5. ``max_bounding_box_count`` - (int64) No more + than this number of bounding boxes will be returned + in the response. Default is 100, the requested value + may be limited by server. + + - For Tables: feature_importance - (boolean) Whether + feature importance should be populated in the + returned TablesAnnotation. The default is false. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.automl_v1beta1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, payload, params]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if payload is not None: + request.payload = payload + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_predict(self, + request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, + *, + name: Optional[str] = None, + input_config: Optional[io.BatchPredictInputConfig] = None, + output_config: Optional[io.BatchPredictOutputConfig] = None, + params: Optional[MutableMapping[str, str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import automl_v1beta1 + + def sample_batch_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + request = automl_v1beta1.BatchPredictRequest( + name="name_value", + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.automl_v1beta1.types.BatchPredictRequest, dict]): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + name (str): + Required. Name of the model requested + to serve the batch prediction. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): + Required. The input configuration for + batch prediction. + + This corresponds to the ``input_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.automl_v1beta1.types.BatchPredictOutputConfig): + Required. The Configuration + specifying where output predictions + should be written. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + params (MutableMapping[str, str]): + Required. Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters + long. + + - For Text Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a text + snippet, it will only produce results that have at + least this confidence score. The default is 0.5. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for an image, + it will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Object Detection: + + ``score_threshold`` - (float) When Model detects + objects on the image, it will only produce bounding + boxes which have at least this confidence score. + Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be produced per + image. Default is 100, the requested value may be + limited by server. + + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to + 1.0. When the model makes predictions for a video, it + will only produce results that have at least this + confidence score. The default is 0.5. + ``segment_classification`` - (boolean) Set to true to + request segment-level classification. AutoML Video + Intelligence returns labels and their confidence + scores for the entire segment of the video that user + specified in the request configuration. The default + is "true". ``shot_classification`` - (boolean) Set to + true to request shot-level classification. AutoML + Video Intelligence determines the boundaries for each + camera shot in the entire segment of the video that + user specified in the request configuration. AutoML + Video Intelligence then returns labels and their + confidence scores for each detected shot, along with + the start and end time of the shot. WARNING: Model + evaluation is not done for this classification type, + the quality of it depends on training data, but there + are no metrics provided to describe that quality. The + default is "false". ``1s_interval_classification`` - + (boolean) Set to true to request classification for a + video at one-second intervals. AutoML Video + Intelligence returns labels and their confidence + scores for each second of the entire segment of the + video that user specified in the request + configuration. WARNING: Model evaluation is not done + for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. The default is + "false". + + - For Tables: + + feature_importance - (boolean) Whether feature + importance should be populated in the returned + TablesAnnotations. The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects + objects on video frames, it will only produce + bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than + this number of bounding boxes will be returned per + frame. Default is 100, the requested value may be + limited by server. ``min_bounding_box_size`` - + (float) Only bounding boxes with shortest edge at + least that long as a relative value of video frame + size will be returned. Value in 0 to 1 range. Default + is 0. + + This corresponds to the ``params`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.automl_v1beta1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, input_config, output_config, params]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.BatchPredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.BatchPredictRequest): + request = prediction_service.BatchPredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if input_config is not None: + request.input_config = input_config + if output_config is not None: + request.output_config = output_config + if params is not None: + request.params = params + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + prediction_service.BatchPredictResult, + metadata_type=operations.OperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "PredictionServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + + + + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "PredictionServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py new file mode 100644 index 00000000..d8c81688 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .rest import PredictionServiceRestTransport +from .rest import PredictionServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry['grpc'] = PredictionServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport +_transport_registry['rest'] = PredictionServiceRestTransport + +__all__ = ( + 'PredictionServiceTransport', + 'PredictionServiceGrpcTransport', + 'PredictionServiceGrpcAsyncIOTransport', + 'PredictionServiceRestTransport', + 'PredictionServiceRestInterceptor', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py new file mode 100644 index 00000000..b166dafa --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.automl_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.automl_v1beta1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'automl.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, + default_timeout=60.0, + client_info=client_info, + ), + self.batch_predict: gapic_v1.method.wrap_method( + self.batch_predict, + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Union[ + prediction_service.PredictResponse, + Awaitable[prediction_service.PredictResponse] + ]]: + raise NotImplementedError() + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'PredictionServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py new file mode 100644 index 00000000..79482505 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py @@ -0,0 +1,348 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.automl_v1beta1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + Returns: + Callable[[~.BatchPredictRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_predict' not in self._stubs: + self._stubs['batch_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.PredictionService/BatchPredict', + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_predict'] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'PredictionServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..ba37665a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.automl_v1beta1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse]]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. The prediction result will be + directly returned in the response. Available for following ML + problems, and their expected request payloads: + + - Image Classification - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Image Object Detection - Image in .JPEG, .GIF or .PNG format, + image_bytes up to 30MB. + - Text Classification - TextSnippet, content up to 60,000 + characters, UTF-8 encoded. + - Text Extraction - TextSnippet, content up to 30,000 + characters, UTF-8 NFC encoded. + - Translation - TextSnippet, content up to 25,000 characters, + UTF-8 encoded. + - Tables - Row, with column values matching the columns of the + model, up to 5MB. Not available for FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. + + - Text Sentiment - TextSnippet, content up 500 characters, + UTF-8 encoded. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch predict method over gRPC. + + Perform a batch prediction. Unlike the online + [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], + batch prediction result won't be immediately available in the + response. Instead, a long running operation object is returned. + User can poll the operation result via + [GetOperation][google.longrunning.Operations.GetOperation] + method. Once the operation is done, + [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] + is returned in the + [response][google.longrunning.Operation.response] field. + Available for following ML problems: + + - Image Classification + - Image Object Detection + - Video Classification + - Video Object Tracking \* Text Extraction + - Tables + + Returns: + Callable[[~.BatchPredictRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_predict' not in self._stubs: + self._stubs['batch_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.automl.v1beta1.PredictionService/BatchPredict', + request_serializer=prediction_service.BatchPredictRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_predict'] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ( + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py new file mode 100644 index 00000000..3bd06e82 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py @@ -0,0 +1,484 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.automl_v1beta1.types import prediction_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class PredictionServiceRestInterceptor: + """Interceptor for PredictionService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the PredictionServiceRestTransport. + + .. code-block:: python + class MyCustomPredictionServiceInterceptor(PredictionServiceRestInterceptor): + def pre_batch_predict(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_predict(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_predict(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_predict(self, response): + logging.log(f"Received response: {response}") + return response + + transport = PredictionServiceRestTransport(interceptor=MyCustomPredictionServiceInterceptor()) + client = PredictionServiceClient(transport=transport) + + + """ + def pre_batch_predict(self, request: prediction_service.BatchPredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.BatchPredictRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_predict + + Override in a subclass to manipulate the request or metadata + before they are sent to the PredictionService server. + """ + return request, metadata + + def post_batch_predict(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for batch_predict + + Override in a subclass to manipulate the response + after it is returned by the PredictionService server but before + it is returned to user code. + """ + return response + def pre_predict(self, request: prediction_service.PredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.PredictRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for predict + + Override in a subclass to manipulate the request or metadata + before they are sent to the PredictionService server. + """ + return request, metadata + + def post_predict(self, response: prediction_service.PredictResponse) -> prediction_service.PredictResponse: + """Post-rpc interceptor for predict + + Override in a subclass to manipulate the response + after it is returned by the PredictionService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class PredictionServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: PredictionServiceRestInterceptor + + +class PredictionServiceRestTransport(PredictionServiceTransport): + """REST backend transport for PredictionService. + + AutoML Prediction API. + + On any input that is documented to expect a string parameter in + snake_case or kebab-case, either of those cases is accepted. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'automl.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[PredictionServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or PredictionServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + 'google.longrunning.Operations.CancelOperation': [ + { + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:cancel', + 'body': '*', + }, + ], + 'google.longrunning.Operations.DeleteOperation': [ + { + 'method': 'delete', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.GetOperation': [ + { + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.ListOperations': [ + { + 'method': 'get', + 'uri': '/v1beta1/{name=projects/*/locations/*}/operations', + }, + ], + 'google.longrunning.Operations.WaitOperation': [ + { + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:wait', + 'body': '*', + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta1") + + self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) + + # Return the client from cache. + return self._operations_client + + class _BatchPredict(PredictionServiceRestStub): + def __hash__(self): + return hash("BatchPredict") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: prediction_service.BatchPredictRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the batch predict method over HTTP. + + Args: + request (~.prediction_service.BatchPredictRequest): + The request object. Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_batch_predict(request, metadata) + pb_request = prediction_service.BatchPredictRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_predict(resp) + return resp + + class _Predict(PredictionServiceRestStub): + def __hash__(self): + return hash("Predict") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: prediction_service.PredictRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> prediction_service.PredictResponse: + r"""Call the predict method over HTTP. + + Args: + request (~.prediction_service.PredictRequest): + The request object. Request message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.prediction_service.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:predict', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_predict(request, metadata) + pb_request = prediction_service.PredictRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = prediction_service.PredictResponse() + pb_resp = prediction_service.PredictResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_predict(resp) + return resp + + @property + def batch_predict(self) -> Callable[ + [prediction_service.BatchPredictRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchPredict(self._session, self._host, self._interceptor) # type: ignore + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._Predict(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'PredictionServiceRestTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py new file mode 100644 index 00000000..c5985ccf --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .annotation_payload import ( + AnnotationPayload, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .classification import ( + ClassificationAnnotation, + ClassificationEvaluationMetrics, + VideoClassificationAnnotation, + ClassificationType, +) +from .column_spec import ( + ColumnSpec, +) +from .data_items import ( + Document, + DocumentDimensions, + ExamplePayload, + Image, + Row, + TextSnippet, +) +from .data_stats import ( + ArrayStats, + CategoryStats, + CorrelationStats, + DataStats, + Float64Stats, + StringStats, + StructStats, + TimestampStats, +) +from .data_types import ( + DataType, + StructType, + TypeCode, +) +from .dataset import ( + Dataset, +) +from .detection import ( + BoundingBoxMetricsEntry, + ImageObjectDetectionAnnotation, + ImageObjectDetectionEvaluationMetrics, + VideoObjectTrackingAnnotation, + VideoObjectTrackingEvaluationMetrics, +) +from .geometry import ( + BoundingPoly, + NormalizedVertex, +) +from .image import ( + ImageClassificationDatasetMetadata, + ImageClassificationModelDeploymentMetadata, + ImageClassificationModelMetadata, + ImageObjectDetectionDatasetMetadata, + ImageObjectDetectionModelDeploymentMetadata, + ImageObjectDetectionModelMetadata, +) +from .io import ( + BatchPredictInputConfig, + BatchPredictOutputConfig, + BigQueryDestination, + BigQuerySource, + DocumentInputConfig, + ExportEvaluatedExamplesOutputConfig, + GcrDestination, + GcsDestination, + GcsSource, + InputConfig, + ModelExportOutputConfig, + OutputConfig, +) +from .model import ( + Model, +) +from .model_evaluation import ( + ModelEvaluation, +) +from .operations import ( + BatchPredictOperationMetadata, + CreateModelOperationMetadata, + DeleteOperationMetadata, + DeployModelOperationMetadata, + ExportDataOperationMetadata, + ExportEvaluatedExamplesOperationMetadata, + ExportModelOperationMetadata, + ImportDataOperationMetadata, + OperationMetadata, + UndeployModelOperationMetadata, +) +from .prediction_service import ( + BatchPredictRequest, + BatchPredictResult, + PredictRequest, + PredictResponse, +) +from .ranges import ( + DoubleRange, +) +from .regression import ( + RegressionEvaluationMetrics, +) +from .service import ( + CreateDatasetRequest, + CreateModelRequest, + DeleteDatasetRequest, + DeleteModelRequest, + DeployModelRequest, + ExportDataRequest, + ExportEvaluatedExamplesRequest, + ExportModelRequest, + GetAnnotationSpecRequest, + GetColumnSpecRequest, + GetDatasetRequest, + GetModelEvaluationRequest, + GetModelRequest, + GetTableSpecRequest, + ImportDataRequest, + ListColumnSpecsRequest, + ListColumnSpecsResponse, + ListDatasetsRequest, + ListDatasetsResponse, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, + ListModelsRequest, + ListModelsResponse, + ListTableSpecsRequest, + ListTableSpecsResponse, + UndeployModelRequest, + UpdateColumnSpecRequest, + UpdateDatasetRequest, + UpdateTableSpecRequest, +) +from .table_spec import ( + TableSpec, +) +from .tables import ( + TablesAnnotation, + TablesDatasetMetadata, + TablesModelColumnInfo, + TablesModelMetadata, +) +from .temporal import ( + TimeSegment, +) +from .text import ( + TextClassificationDatasetMetadata, + TextClassificationModelMetadata, + TextExtractionDatasetMetadata, + TextExtractionModelMetadata, + TextSentimentDatasetMetadata, + TextSentimentModelMetadata, +) +from .text_extraction import ( + TextExtractionAnnotation, + TextExtractionEvaluationMetrics, +) +from .text_segment import ( + TextSegment, +) +from .text_sentiment import ( + TextSentimentAnnotation, + TextSentimentEvaluationMetrics, +) +from .translation import ( + TranslationAnnotation, + TranslationDatasetMetadata, + TranslationEvaluationMetrics, + TranslationModelMetadata, +) +from .video import ( + VideoClassificationDatasetMetadata, + VideoClassificationModelMetadata, + VideoObjectTrackingDatasetMetadata, + VideoObjectTrackingModelMetadata, +) + +__all__ = ( + 'AnnotationPayload', + 'AnnotationSpec', + 'ClassificationAnnotation', + 'ClassificationEvaluationMetrics', + 'VideoClassificationAnnotation', + 'ClassificationType', + 'ColumnSpec', + 'Document', + 'DocumentDimensions', + 'ExamplePayload', + 'Image', + 'Row', + 'TextSnippet', + 'ArrayStats', + 'CategoryStats', + 'CorrelationStats', + 'DataStats', + 'Float64Stats', + 'StringStats', + 'StructStats', + 'TimestampStats', + 'DataType', + 'StructType', + 'TypeCode', + 'Dataset', + 'BoundingBoxMetricsEntry', + 'ImageObjectDetectionAnnotation', + 'ImageObjectDetectionEvaluationMetrics', + 'VideoObjectTrackingAnnotation', + 'VideoObjectTrackingEvaluationMetrics', + 'BoundingPoly', + 'NormalizedVertex', + 'ImageClassificationDatasetMetadata', + 'ImageClassificationModelDeploymentMetadata', + 'ImageClassificationModelMetadata', + 'ImageObjectDetectionDatasetMetadata', + 'ImageObjectDetectionModelDeploymentMetadata', + 'ImageObjectDetectionModelMetadata', + 'BatchPredictInputConfig', + 'BatchPredictOutputConfig', + 'BigQueryDestination', + 'BigQuerySource', + 'DocumentInputConfig', + 'ExportEvaluatedExamplesOutputConfig', + 'GcrDestination', + 'GcsDestination', + 'GcsSource', + 'InputConfig', + 'ModelExportOutputConfig', + 'OutputConfig', + 'Model', + 'ModelEvaluation', + 'BatchPredictOperationMetadata', + 'CreateModelOperationMetadata', + 'DeleteOperationMetadata', + 'DeployModelOperationMetadata', + 'ExportDataOperationMetadata', + 'ExportEvaluatedExamplesOperationMetadata', + 'ExportModelOperationMetadata', + 'ImportDataOperationMetadata', + 'OperationMetadata', + 'UndeployModelOperationMetadata', + 'BatchPredictRequest', + 'BatchPredictResult', + 'PredictRequest', + 'PredictResponse', + 'DoubleRange', + 'RegressionEvaluationMetrics', + 'CreateDatasetRequest', + 'CreateModelRequest', + 'DeleteDatasetRequest', + 'DeleteModelRequest', + 'DeployModelRequest', + 'ExportDataRequest', + 'ExportEvaluatedExamplesRequest', + 'ExportModelRequest', + 'GetAnnotationSpecRequest', + 'GetColumnSpecRequest', + 'GetDatasetRequest', + 'GetModelEvaluationRequest', + 'GetModelRequest', + 'GetTableSpecRequest', + 'ImportDataRequest', + 'ListColumnSpecsRequest', + 'ListColumnSpecsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListTableSpecsRequest', + 'ListTableSpecsResponse', + 'UndeployModelRequest', + 'UpdateColumnSpecRequest', + 'UpdateDatasetRequest', + 'UpdateTableSpecRequest', + 'TableSpec', + 'TablesAnnotation', + 'TablesDatasetMetadata', + 'TablesModelColumnInfo', + 'TablesModelMetadata', + 'TimeSegment', + 'TextClassificationDatasetMetadata', + 'TextClassificationModelMetadata', + 'TextExtractionDatasetMetadata', + 'TextExtractionModelMetadata', + 'TextSentimentDatasetMetadata', + 'TextSentimentModelMetadata', + 'TextExtractionAnnotation', + 'TextExtractionEvaluationMetrics', + 'TextSegment', + 'TextSentimentAnnotation', + 'TextSentimentEvaluationMetrics', + 'TranslationAnnotation', + 'TranslationDatasetMetadata', + 'TranslationEvaluationMetrics', + 'TranslationModelMetadata', + 'VideoClassificationDatasetMetadata', + 'VideoClassificationModelMetadata', + 'VideoObjectTrackingDatasetMetadata', + 'VideoObjectTrackingModelMetadata', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py new file mode 100644 index 00000000..6e93ea5a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import classification as gca_classification +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import tables as gca_tables +from google.cloud.automl_v1beta1.types import text_extraction as gca_text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment as gca_text_sentiment +from google.cloud.automl_v1beta1.types import translation as gca_translation + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'AnnotationPayload', + }, +) + + +class AnnotationPayload(proto.Message): + r"""Contains annotation information that is relevant to AutoML. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + translation (google.cloud.automl_v1beta1.types.TranslationAnnotation): + Annotation details for translation. + + This field is a member of `oneof`_ ``detail``. + classification (google.cloud.automl_v1beta1.types.ClassificationAnnotation): + Annotation details for content or image + classification. + + This field is a member of `oneof`_ ``detail``. + image_object_detection (google.cloud.automl_v1beta1.types.ImageObjectDetectionAnnotation): + Annotation details for image object + detection. + + This field is a member of `oneof`_ ``detail``. + video_classification (google.cloud.automl_v1beta1.types.VideoClassificationAnnotation): + Annotation details for video classification. + Returned for Video Classification predictions. + + This field is a member of `oneof`_ ``detail``. + video_object_tracking (google.cloud.automl_v1beta1.types.VideoObjectTrackingAnnotation): + Annotation details for video object tracking. + + This field is a member of `oneof`_ ``detail``. + text_extraction (google.cloud.automl_v1beta1.types.TextExtractionAnnotation): + Annotation details for text extraction. + + This field is a member of `oneof`_ ``detail``. + text_sentiment (google.cloud.automl_v1beta1.types.TextSentimentAnnotation): + Annotation details for text sentiment. + + This field is a member of `oneof`_ ``detail``. + tables (google.cloud.automl_v1beta1.types.TablesAnnotation): + Annotation details for Tables. + + This field is a member of `oneof`_ ``detail``. + annotation_spec_id (str): + Output only . The resource ID of the + annotation spec that this annotation pertains + to. The annotation spec comes from either an + ancestor dataset, or the dataset that was used + to train the model in use. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] + when the model was trained. Because this field returns a + value at model training time, for different models trained + using the same dataset, the returned value could be + different as model owner could update the ``display_name`` + between any two model training. + """ + + translation: gca_translation.TranslationAnnotation = proto.Field( + proto.MESSAGE, + number=2, + oneof='detail', + message=gca_translation.TranslationAnnotation, + ) + classification: gca_classification.ClassificationAnnotation = proto.Field( + proto.MESSAGE, + number=3, + oneof='detail', + message=gca_classification.ClassificationAnnotation, + ) + image_object_detection: detection.ImageObjectDetectionAnnotation = proto.Field( + proto.MESSAGE, + number=4, + oneof='detail', + message=detection.ImageObjectDetectionAnnotation, + ) + video_classification: gca_classification.VideoClassificationAnnotation = proto.Field( + proto.MESSAGE, + number=9, + oneof='detail', + message=gca_classification.VideoClassificationAnnotation, + ) + video_object_tracking: detection.VideoObjectTrackingAnnotation = proto.Field( + proto.MESSAGE, + number=8, + oneof='detail', + message=detection.VideoObjectTrackingAnnotation, + ) + text_extraction: gca_text_extraction.TextExtractionAnnotation = proto.Field( + proto.MESSAGE, + number=6, + oneof='detail', + message=gca_text_extraction.TextExtractionAnnotation, + ) + text_sentiment: gca_text_sentiment.TextSentimentAnnotation = proto.Field( + proto.MESSAGE, + number=7, + oneof='detail', + message=gca_text_sentiment.TextSentimentAnnotation, + ) + tables: gca_tables.TablesAnnotation = proto.Field( + proto.MESSAGE, + number=10, + oneof='detail', + message=gca_tables.TablesAnnotation, + ) + annotation_spec_id: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py new file mode 100644 index 00000000..4cd2e1dc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'AnnotationSpec', + }, +) + + +class AnnotationSpec(proto.Message): + r"""A definition of an annotation spec. + + Attributes: + name (str): + Output only. Resource name of the annotation spec. Form: + + 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' + display_name (str): + Required. The name of the annotation spec to show in the + interface. The name can be up to 32 characters long and must + match the regexp ``[a-zA-Z0-9_]+``. + example_count (int): + Output only. The number of examples in the + parent dataset labeled by the annotation spec. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + example_count: int = proto.Field( + proto.INT32, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py new file mode 100644 index 00000000..82b43c9f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import temporal + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'ClassificationType', + 'ClassificationAnnotation', + 'VideoClassificationAnnotation', + 'ClassificationEvaluationMetrics', + }, +) + + +class ClassificationType(proto.Enum): + r"""Type of the classification problem. + + Values: + CLASSIFICATION_TYPE_UNSPECIFIED (0): + An un-set value of this enum. + MULTICLASS (1): + At most one label is allowed per example. + MULTILABEL (2): + Multiple labels are allowed for one example. + """ + CLASSIFICATION_TYPE_UNSPECIFIED = 0 + MULTICLASS = 1 + MULTILABEL = 2 + + +class ClassificationAnnotation(proto.Message): + r"""Contains annotation details specific to classification. + + Attributes: + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence that the annotation is positive. If a + user approves an annotation as negative or + positive, the score value remains unchanged. If + a user creates an annotation, the score is 0 for + negative or 1 for positive. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + ) + + +class VideoClassificationAnnotation(proto.Message): + r"""Contains annotation details specific to video classification. + + Attributes: + type_ (str): + Output only. Expresses the type of video classification. + Possible values: + + - ``segment`` - Classification done on a specified by user + time segment of a video. AnnotationSpec is answered to be + present in that time segment, if it is present in any + part of it. The video ML model evaluations are done only + for this type of classification. + + - ``shot``- Shot-level classification. AutoML Video + Intelligence determines the boundaries for each camera + shot in the entire segment of the video that user + specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence + scores for each detected shot, along with the start and + end time of the shot. WARNING: Model evaluation is not + done for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. + + - ``1s_interval`` - AutoML Video Intelligence returns + labels and their confidence scores for each second of the + entire segment of the video that user specified in the + request configuration. WARNING: Model evaluation is not + done for this classification type, the quality of it + depends on training data, but there are no metrics + provided to describe that quality. + classification_annotation (google.cloud.automl_v1beta1.types.ClassificationAnnotation): + Output only . The classification details of + this annotation. + time_segment (google.cloud.automl_v1beta1.types.TimeSegment): + Output only . The time segment of the video + to which the annotation applies. + """ + + type_: str = proto.Field( + proto.STRING, + number=1, + ) + classification_annotation: 'ClassificationAnnotation' = proto.Field( + proto.MESSAGE, + number=2, + message='ClassificationAnnotation', + ) + time_segment: temporal.TimeSegment = proto.Field( + proto.MESSAGE, + number=3, + message=temporal.TimeSegment, + ) + + +class ClassificationEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for classification problems. Note: For + Video Classification this metrics only describe quality of the Video + Classification predictions of "segment_classification" type. + + Attributes: + au_prc (float): + Output only. The Area Under Precision-Recall + Curve metric. Micro-averaged for the overall + evaluation. + base_au_prc (float): + Output only. The Area Under Precision-Recall + Curve metric based on priors. Micro-averaged for + the overall evaluation. Deprecated. + au_roc (float): + Output only. The Area Under Receiver + Operating Characteristic curve metric. + Micro-averaged for the overall evaluation. + log_loss (float): + Output only. The Log Loss metric. + confidence_metrics_entry (MutableSequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics for each confidence_threshold in + 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and + position_threshold = INT32_MAX_VALUE. ROC and + precision-recall curves, and other aggregated metrics are + derived from them. The confidence metrics entries may also + be supplied for additional values of position_threshold, but + from these no aggregated metrics are computed. + confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for MULTICLASS + classification problems where number of labels + is no more than 10. + Only set for model level evaluation, not for + evaluation per label. + annotation_spec_id (MutableSequence[str]): + Output only. The annotation spec ids used for + this evaluation. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. Metrics are computed with an + assumption that the model never returns + predictions with score lower than this value. + position_threshold (int): + Output only. Metrics are computed with an assumption that + the model always returns at most this many predictions + (ordered by their score, descendingly), but they all still + need to meet the confidence_threshold. + recall (float): + Output only. Recall (True Positive Rate) for + the given confidence threshold. + precision (float): + Output only. Precision for the given + confidence threshold. + false_positive_rate (float): + Output only. False Positive Rate for the + given confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + recall_at1 (float): + Output only. The Recall (True Positive Rate) + when only considering the label that has the + highest prediction score and not below the + confidence threshold for each example. + precision_at1 (float): + Output only. The precision when only + considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + false_positive_rate_at1 (float): + Output only. The False Positive Rate when + only considering the label that has the highest + prediction score and not below the confidence + threshold for each example. + f1_score_at1 (float): + Output only. The harmonic mean of + [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] + and + [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. + true_positive_count (int): + Output only. The number of model created + labels that match a ground truth label. + false_positive_count (int): + Output only. The number of model created + labels that do not match a ground truth label. + false_negative_count (int): + Output only. The number of ground truth + labels that are not matched by a model created + label. + true_negative_count (int): + Output only. The number of labels that were + not created by the model, but if they would, + they would not match a ground truth label. + """ + + confidence_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + position_threshold: int = proto.Field( + proto.INT32, + number=14, + ) + recall: float = proto.Field( + proto.FLOAT, + number=2, + ) + precision: float = proto.Field( + proto.FLOAT, + number=3, + ) + false_positive_rate: float = proto.Field( + proto.FLOAT, + number=8, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=4, + ) + recall_at1: float = proto.Field( + proto.FLOAT, + number=5, + ) + precision_at1: float = proto.Field( + proto.FLOAT, + number=6, + ) + false_positive_rate_at1: float = proto.Field( + proto.FLOAT, + number=9, + ) + f1_score_at1: float = proto.Field( + proto.FLOAT, + number=7, + ) + true_positive_count: int = proto.Field( + proto.INT64, + number=10, + ) + false_positive_count: int = proto.Field( + proto.INT64, + number=11, + ) + false_negative_count: int = proto.Field( + proto.INT64, + number=12, + ) + true_negative_count: int = proto.Field( + proto.INT64, + number=13, + ) + + class ConfusionMatrix(proto.Message): + r"""Confusion matrix of the model running the classification. + + Attributes: + annotation_spec_id (MutableSequence[str]): + Output only. IDs of the annotation specs used in the + confusion matrix. For Tables CLASSIFICATION + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] + only list of [annotation_spec_display_name-s][] is + populated. + display_name (MutableSequence[str]): + Output only. Display name of the annotation specs used in + the confusion matrix, as they were at the moment of the + evaluation. For Tables CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], + distinct values of the target column at the moment of the + model evaluation are populated here. + row (MutableSequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): + Output only. Rows in the confusion matrix. The number of + rows is equal to the size of ``annotation_spec_id``. + ``row[i].example_count[j]`` is the number of examples that + have ground truth of the ``annotation_spec_id[i]`` and are + predicted as ``annotation_spec_id[j]`` by the model being + evaluated. + """ + + class Row(proto.Message): + r"""Output only. A row in the confusion matrix. + + Attributes: + example_count (MutableSequence[int]): + Output only. Value of the specific cell in the confusion + matrix. The number of values each row has (i.e. the length + of the row) is equal to the length of the + ``annotation_spec_id`` field or, if that one is not + populated, length of the + [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] + field. + """ + + example_count: MutableSequence[int] = proto.RepeatedField( + proto.INT32, + number=1, + ) + + annotation_spec_id: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + display_name: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + row: MutableSequence['ClassificationEvaluationMetrics.ConfusionMatrix.Row'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ClassificationEvaluationMetrics.ConfusionMatrix.Row', + ) + + au_prc: float = proto.Field( + proto.FLOAT, + number=1, + ) + base_au_prc: float = proto.Field( + proto.FLOAT, + number=2, + ) + au_roc: float = proto.Field( + proto.FLOAT, + number=6, + ) + log_loss: float = proto.Field( + proto.FLOAT, + number=7, + ) + confidence_metrics_entry: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=ConfidenceMetricsEntry, + ) + confusion_matrix: ConfusionMatrix = proto.Field( + proto.MESSAGE, + number=4, + message=ConfusionMatrix, + ) + annotation_spec_id: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py new file mode 100644 index 00000000..fc917f3f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import data_stats as gca_data_stats +from google.cloud.automl_v1beta1.types import data_types + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'ColumnSpec', + }, +) + + +class ColumnSpec(proto.Message): + r"""A representation of a column in a relational table. When listing + them, column specs are returned in the same order in which they were + given on import . Used by: + + - Tables + + Attributes: + name (str): + Output only. The resource name of the column specs. Form: + + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}/columnSpecs/{column_spec_id}`` + data_type (google.cloud.automl_v1beta1.types.DataType): + The data type of elements stored in the + column. + display_name (str): + Output only. The name of the column to show in the + interface. The name can be up to 100 characters long and can + consist only of ASCII Latin letters A-Z and a-z, ASCII + digits 0-9, underscores(_), and forward slashes(/), and must + start with a letter or a digit. + data_stats (google.cloud.automl_v1beta1.types.DataStats): + Output only. Stats of the series of values in the column. + This field may be stale, see the ancestor's + Dataset.tables_dataset_metadata.stats_update_time field for + the timestamp at which these stats were last updated. + top_correlated_columns (MutableSequence[google.cloud.automl_v1beta1.types.ColumnSpec.CorrelatedColumn]): + Deprecated. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + """ + + class CorrelatedColumn(proto.Message): + r"""Identifies the table's column, and its correlation with the + column this ColumnSpec describes. + + Attributes: + column_spec_id (str): + The column_spec_id of the correlated column, which belongs + to the same table as the in-context column. + correlation_stats (google.cloud.automl_v1beta1.types.CorrelationStats): + Correlation between this and the in-context + column. + """ + + column_spec_id: str = proto.Field( + proto.STRING, + number=1, + ) + correlation_stats: gca_data_stats.CorrelationStats = proto.Field( + proto.MESSAGE, + number=2, + message=gca_data_stats.CorrelationStats, + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + data_type: data_types.DataType = proto.Field( + proto.MESSAGE, + number=2, + message=data_types.DataType, + ) + display_name: str = proto.Field( + proto.STRING, + number=3, + ) + data_stats: gca_data_stats.DataStats = proto.Field( + proto.MESSAGE, + number=4, + message=gca_data_stats.DataStats, + ) + top_correlated_columns: MutableSequence[CorrelatedColumn] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=CorrelatedColumn, + ) + etag: str = proto.Field( + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py new file mode 100644 index 00000000..961dfe80 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py @@ -0,0 +1,398 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import geometry +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'Image', + 'TextSnippet', + 'DocumentDimensions', + 'Document', + 'Row', + 'ExamplePayload', + }, +) + + +class Image(proto.Message): + r"""A representation of an image. + Only images up to 30MB in size are supported. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + image_bytes (bytes): + Image content represented as a stream of bytes. Note: As + with all ``bytes`` fields, protobuffers use a pure binary + representation, whereas JSON representations use base64. + + This field is a member of `oneof`_ ``data``. + input_config (google.cloud.automl_v1beta1.types.InputConfig): + An input config specifying the content of the + image. + + This field is a member of `oneof`_ ``data``. + thumbnail_uri (str): + Output only. HTTP URI to the thumbnail image. + """ + + image_bytes: bytes = proto.Field( + proto.BYTES, + number=1, + oneof='data', + ) + input_config: io.InputConfig = proto.Field( + proto.MESSAGE, + number=6, + oneof='data', + message=io.InputConfig, + ) + thumbnail_uri: str = proto.Field( + proto.STRING, + number=4, + ) + + +class TextSnippet(proto.Message): + r"""A representation of a text snippet. + + Attributes: + content (str): + Required. The content of the text snippet as + a string. Up to 250000 characters long. + mime_type (str): + Optional. The format of + [content][google.cloud.automl.v1beta1.TextSnippet.content]. + Currently the only two allowed values are "text/html" and + "text/plain". If left blank, the format is automatically + determined from the type of the uploaded + [content][google.cloud.automl.v1beta1.TextSnippet.content]. + content_uri (str): + Output only. HTTP URI where you can download + the content. + """ + + content: str = proto.Field( + proto.STRING, + number=1, + ) + mime_type: str = proto.Field( + proto.STRING, + number=2, + ) + content_uri: str = proto.Field( + proto.STRING, + number=4, + ) + + +class DocumentDimensions(proto.Message): + r"""Message that describes dimension of a document. + + Attributes: + unit (google.cloud.automl_v1beta1.types.DocumentDimensions.DocumentDimensionUnit): + Unit of the dimension. + width (float): + Width value of the document, works together + with the unit. + height (float): + Height value of the document, works together + with the unit. + """ + class DocumentDimensionUnit(proto.Enum): + r"""Unit of the document dimension. + + Values: + DOCUMENT_DIMENSION_UNIT_UNSPECIFIED (0): + Should not be used. + INCH (1): + Document dimension is measured in inches. + CENTIMETER (2): + Document dimension is measured in + centimeters. + POINT (3): + Document dimension is measured in points. 72 + points = 1 inch. + """ + DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 + INCH = 1 + CENTIMETER = 2 + POINT = 3 + + unit: DocumentDimensionUnit = proto.Field( + proto.ENUM, + number=1, + enum=DocumentDimensionUnit, + ) + width: float = proto.Field( + proto.FLOAT, + number=2, + ) + height: float = proto.Field( + proto.FLOAT, + number=3, + ) + + +class Document(proto.Message): + r"""A structured text document e.g. a PDF. + + Attributes: + input_config (google.cloud.automl_v1beta1.types.DocumentInputConfig): + An input config specifying the content of the + document. + document_text (google.cloud.automl_v1beta1.types.TextSnippet): + The plain text version of this document. + layout (MutableSequence[google.cloud.automl_v1beta1.types.Document.Layout]): + Describes the layout of the document. Sorted by + [page_number][]. + document_dimensions (google.cloud.automl_v1beta1.types.DocumentDimensions): + The dimensions of the page in the document. + page_count (int): + Number of pages in the document. + """ + + class Layout(proto.Message): + r"""Describes the layout information of a + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in the document. + + Attributes: + text_segment (google.cloud.automl_v1beta1.types.TextSegment): + Text Segment that represents a segment in + [document_text][google.cloud.automl.v1beta1.Document.document_text]. + page_number (int): + Page number of the + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in the original document, starts from 1. + bounding_poly (google.cloud.automl_v1beta1.types.BoundingPoly): + The position of the + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in the page. Contains exactly 4 + + [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices] + and they are connected by edges in the order provided, which + will represent a rectangle parallel to the frame. The + [NormalizedVertex-s][google.cloud.automl.v1beta1.NormalizedVertex] + are relative to the page. Coordinates are based on top-left + as point (0,0). + text_segment_type (google.cloud.automl_v1beta1.types.Document.Layout.TextSegmentType): + The type of the + [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] + in document. + """ + class TextSegmentType(proto.Enum): + r"""The type of TextSegment in the context of the original + document. + + Values: + TEXT_SEGMENT_TYPE_UNSPECIFIED (0): + Should not be used. + TOKEN (1): + The text segment is a token. e.g. word. + PARAGRAPH (2): + The text segment is a paragraph. + FORM_FIELD (3): + The text segment is a form field. + FORM_FIELD_NAME (4): + The text segment is the name part of a form field. It will + be treated as child of another FORM_FIELD TextSegment if its + span is subspan of another TextSegment with type FORM_FIELD. + FORM_FIELD_CONTENTS (5): + The text segment is the text content part of a form field. + It will be treated as child of another FORM_FIELD + TextSegment if its span is subspan of another TextSegment + with type FORM_FIELD. + TABLE (6): + The text segment is a whole table, including + headers, and all rows. + TABLE_HEADER (7): + The text segment is a table's headers. It + will be treated as child of another TABLE + TextSegment if its span is subspan of another + TextSegment with type TABLE. + TABLE_ROW (8): + The text segment is a row in table. It will + be treated as child of another TABLE TextSegment + if its span is subspan of another TextSegment + with type TABLE. + TABLE_CELL (9): + The text segment is a cell in table. It will be treated as + child of another TABLE_ROW TextSegment if its span is + subspan of another TextSegment with type TABLE_ROW. + """ + TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 + TOKEN = 1 + PARAGRAPH = 2 + FORM_FIELD = 3 + FORM_FIELD_NAME = 4 + FORM_FIELD_CONTENTS = 5 + TABLE = 6 + TABLE_HEADER = 7 + TABLE_ROW = 8 + TABLE_CELL = 9 + + text_segment: gca_text_segment.TextSegment = proto.Field( + proto.MESSAGE, + number=1, + message=gca_text_segment.TextSegment, + ) + page_number: int = proto.Field( + proto.INT32, + number=2, + ) + bounding_poly: geometry.BoundingPoly = proto.Field( + proto.MESSAGE, + number=3, + message=geometry.BoundingPoly, + ) + text_segment_type: 'Document.Layout.TextSegmentType' = proto.Field( + proto.ENUM, + number=4, + enum='Document.Layout.TextSegmentType', + ) + + input_config: io.DocumentInputConfig = proto.Field( + proto.MESSAGE, + number=1, + message=io.DocumentInputConfig, + ) + document_text: 'TextSnippet' = proto.Field( + proto.MESSAGE, + number=2, + message='TextSnippet', + ) + layout: MutableSequence[Layout] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Layout, + ) + document_dimensions: 'DocumentDimensions' = proto.Field( + proto.MESSAGE, + number=4, + message='DocumentDimensions', + ) + page_count: int = proto.Field( + proto.INT32, + number=5, + ) + + +class Row(proto.Message): + r"""A representation of a row in a relational table. + + Attributes: + column_spec_ids (MutableSequence[str]): + The resource IDs of the column specs describing the columns + of the row. If set must contain, but possibly in a different + order, all input feature + + [column_spec_ids][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + of the Model this row is being passed to. Note: The below + ``values`` field must match order of this field, if this + field is set. + values (MutableSequence[google.protobuf.struct_pb2.Value]): + Required. The values of the row cells, given in the same + order as the column_spec_ids, or, if not set, then in the + same order as input feature + + [column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + of the Model this row is being passed to. + """ + + column_spec_ids: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + values: MutableSequence[struct_pb2.Value] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +class ExamplePayload(proto.Message): + r"""Example data used for training or prediction. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + image (google.cloud.automl_v1beta1.types.Image): + Example image. + + This field is a member of `oneof`_ ``payload``. + text_snippet (google.cloud.automl_v1beta1.types.TextSnippet): + Example text. + + This field is a member of `oneof`_ ``payload``. + document (google.cloud.automl_v1beta1.types.Document): + Example document. + + This field is a member of `oneof`_ ``payload``. + row (google.cloud.automl_v1beta1.types.Row): + Example relational table row. + + This field is a member of `oneof`_ ``payload``. + """ + + image: 'Image' = proto.Field( + proto.MESSAGE, + number=1, + oneof='payload', + message='Image', + ) + text_snippet: 'TextSnippet' = proto.Field( + proto.MESSAGE, + number=2, + oneof='payload', + message='TextSnippet', + ) + document: 'Document' = proto.Field( + proto.MESSAGE, + number=4, + oneof='payload', + message='Document', + ) + row: 'Row' = proto.Field( + proto.MESSAGE, + number=3, + oneof='payload', + message='Row', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py new file mode 100644 index 00000000..1b5019c3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py @@ -0,0 +1,361 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'DataStats', + 'Float64Stats', + 'StringStats', + 'TimestampStats', + 'ArrayStats', + 'StructStats', + 'CategoryStats', + 'CorrelationStats', + }, +) + + +class DataStats(proto.Message): + r"""The data statistics of a series of values that share the same + DataType. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + float64_stats (google.cloud.automl_v1beta1.types.Float64Stats): + The statistics for FLOAT64 DataType. + + This field is a member of `oneof`_ ``stats``. + string_stats (google.cloud.automl_v1beta1.types.StringStats): + The statistics for STRING DataType. + + This field is a member of `oneof`_ ``stats``. + timestamp_stats (google.cloud.automl_v1beta1.types.TimestampStats): + The statistics for TIMESTAMP DataType. + + This field is a member of `oneof`_ ``stats``. + array_stats (google.cloud.automl_v1beta1.types.ArrayStats): + The statistics for ARRAY DataType. + + This field is a member of `oneof`_ ``stats``. + struct_stats (google.cloud.automl_v1beta1.types.StructStats): + The statistics for STRUCT DataType. + + This field is a member of `oneof`_ ``stats``. + category_stats (google.cloud.automl_v1beta1.types.CategoryStats): + The statistics for CATEGORY DataType. + + This field is a member of `oneof`_ ``stats``. + distinct_value_count (int): + The number of distinct values. + null_value_count (int): + The number of values that are null. + valid_value_count (int): + The number of values that are valid. + """ + + float64_stats: 'Float64Stats' = proto.Field( + proto.MESSAGE, + number=3, + oneof='stats', + message='Float64Stats', + ) + string_stats: 'StringStats' = proto.Field( + proto.MESSAGE, + number=4, + oneof='stats', + message='StringStats', + ) + timestamp_stats: 'TimestampStats' = proto.Field( + proto.MESSAGE, + number=5, + oneof='stats', + message='TimestampStats', + ) + array_stats: 'ArrayStats' = proto.Field( + proto.MESSAGE, + number=6, + oneof='stats', + message='ArrayStats', + ) + struct_stats: 'StructStats' = proto.Field( + proto.MESSAGE, + number=7, + oneof='stats', + message='StructStats', + ) + category_stats: 'CategoryStats' = proto.Field( + proto.MESSAGE, + number=8, + oneof='stats', + message='CategoryStats', + ) + distinct_value_count: int = proto.Field( + proto.INT64, + number=1, + ) + null_value_count: int = proto.Field( + proto.INT64, + number=2, + ) + valid_value_count: int = proto.Field( + proto.INT64, + number=9, + ) + + +class Float64Stats(proto.Message): + r"""The data statistics of a series of FLOAT64 values. + + Attributes: + mean (float): + The mean of the series. + standard_deviation (float): + The standard deviation of the series. + quantiles (MutableSequence[float]): + Ordered from 0 to k k-quantile values of the data series of + n values. The value at index i is, approximately, the + i*n/k-th smallest value in the series; for i = 0 and i = k + these are, respectively, the min and max values. + histogram_buckets (MutableSequence[google.cloud.automl_v1beta1.types.Float64Stats.HistogramBucket]): + Histogram buckets of the data series. Sorted by the min + value of the bucket, ascendingly, and the number of the + buckets is dynamically generated. The buckets are + non-overlapping and completely cover whole FLOAT64 range + with min of first bucket being ``"-Infinity"``, and max of + the last one being ``"Infinity"``. + """ + + class HistogramBucket(proto.Message): + r"""A bucket of a histogram. + + Attributes: + min_ (float): + The minimum value of the bucket, inclusive. + max_ (float): + The maximum value of the bucket, exclusive unless max = + ``"Infinity"``, in which case it's inclusive. + count (int): + The number of data values that are in the + bucket, i.e. are between min and max values. + """ + + min_: float = proto.Field( + proto.DOUBLE, + number=1, + ) + max_: float = proto.Field( + proto.DOUBLE, + number=2, + ) + count: int = proto.Field( + proto.INT64, + number=3, + ) + + mean: float = proto.Field( + proto.DOUBLE, + number=1, + ) + standard_deviation: float = proto.Field( + proto.DOUBLE, + number=2, + ) + quantiles: MutableSequence[float] = proto.RepeatedField( + proto.DOUBLE, + number=3, + ) + histogram_buckets: MutableSequence[HistogramBucket] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=HistogramBucket, + ) + + +class StringStats(proto.Message): + r"""The data statistics of a series of STRING values. + + Attributes: + top_unigram_stats (MutableSequence[google.cloud.automl_v1beta1.types.StringStats.UnigramStats]): + The statistics of the top 20 unigrams, ordered by + [count][google.cloud.automl.v1beta1.StringStats.UnigramStats.count]. + """ + + class UnigramStats(proto.Message): + r"""The statistics of a unigram. + + Attributes: + value (str): + The unigram. + count (int): + The number of occurrences of this unigram in + the series. + """ + + value: str = proto.Field( + proto.STRING, + number=1, + ) + count: int = proto.Field( + proto.INT64, + number=2, + ) + + top_unigram_stats: MutableSequence[UnigramStats] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=UnigramStats, + ) + + +class TimestampStats(proto.Message): + r"""The data statistics of a series of TIMESTAMP values. + + Attributes: + granular_stats (MutableMapping[str, google.cloud.automl_v1beta1.types.TimestampStats.GranularStats]): + The string key is the pre-defined granularity. Currently + supported: hour_of_day, day_of_week, month_of_year. + Granularities finer that the granularity of timestamp data + are not populated (e.g. if timestamps are at day + granularity, then hour_of_day is not populated). + """ + + class GranularStats(proto.Message): + r"""Stats split by a defined in context granularity. + + Attributes: + buckets (MutableMapping[int, int]): + A map from granularity key to example count for that key. + E.g. for hour_of_day ``13`` means 1pm, or for month_of_year + ``5`` means May). + """ + + buckets: MutableMapping[int, int] = proto.MapField( + proto.INT32, + proto.INT64, + number=1, + ) + + granular_stats: MutableMapping[str, GranularStats] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=GranularStats, + ) + + +class ArrayStats(proto.Message): + r"""The data statistics of a series of ARRAY values. + + Attributes: + member_stats (google.cloud.automl_v1beta1.types.DataStats): + Stats of all the values of all arrays, as if + they were a single long series of data. The type + depends on the element type of the array. + """ + + member_stats: 'DataStats' = proto.Field( + proto.MESSAGE, + number=2, + message='DataStats', + ) + + +class StructStats(proto.Message): + r"""The data statistics of a series of STRUCT values. + + Attributes: + field_stats (MutableMapping[str, google.cloud.automl_v1beta1.types.DataStats]): + Map from a field name of the struct to data + stats aggregated over series of all data in that + field across all the structs. + """ + + field_stats: MutableMapping[str, 'DataStats'] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='DataStats', + ) + + +class CategoryStats(proto.Message): + r"""The data statistics of a series of CATEGORY values. + + Attributes: + top_category_stats (MutableSequence[google.cloud.automl_v1beta1.types.CategoryStats.SingleCategoryStats]): + The statistics of the top 20 CATEGORY values, ordered by + + [count][google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count]. + """ + + class SingleCategoryStats(proto.Message): + r"""The statistics of a single CATEGORY value. + + Attributes: + value (str): + The CATEGORY value. + count (int): + The number of occurrences of this value in + the series. + """ + + value: str = proto.Field( + proto.STRING, + number=1, + ) + count: int = proto.Field( + proto.INT64, + number=2, + ) + + top_category_stats: MutableSequence[SingleCategoryStats] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=SingleCategoryStats, + ) + + +class CorrelationStats(proto.Message): + r"""A correlation statistics between two series of DataType + values. The series may have differing DataType-s, but within a + single series the DataType must be the same. + + Attributes: + cramers_v (float): + The correlation value using the Cramer's V + measure. + """ + + cramers_v: float = proto.Field( + proto.DOUBLE, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py new file mode 100644 index 00000000..528fcfed --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TypeCode', + 'DataType', + 'StructType', + }, +) + + +class TypeCode(proto.Enum): + r"""``TypeCode`` is used as a part of + [DataType][google.cloud.automl.v1beta1.DataType]. + + Values: + TYPE_CODE_UNSPECIFIED (0): + Not specified. Should not be used. + FLOAT64 (3): + Encoded as ``number``, or the strings ``"NaN"``, + ``"Infinity"``, or ``"-Infinity"``. + TIMESTAMP (4): + Must be between 0AD and 9999AD. Encoded as ``string`` + according to + [time_format][google.cloud.automl.v1beta1.DataType.time_format], + or, if that format is not set, then in RFC 3339 + ``date-time`` format, where ``time-offset`` = ``"Z"`` (e.g. + 1985-04-12T23:20:50.52Z). + STRING (6): + Encoded as ``string``. + ARRAY (8): + Encoded as ``list``, where the list elements are represented + according to + + [list_element_type][google.cloud.automl.v1beta1.DataType.list_element_type]. + STRUCT (9): + Encoded as ``struct``, where field values are represented + according to + [struct_type][google.cloud.automl.v1beta1.DataType.struct_type]. + CATEGORY (10): + Values of this type are not further understood by AutoML, + e.g. AutoML is unable to tell the order of values (as it + could with FLOAT64), or is unable to say if one value + contains another (as it could with STRING). Encoded as + ``string`` (bytes should be base64-encoded, as described in + RFC 4648, section 4). + """ + TYPE_CODE_UNSPECIFIED = 0 + FLOAT64 = 3 + TIMESTAMP = 4 + STRING = 6 + ARRAY = 8 + STRUCT = 9 + CATEGORY = 10 + + +class DataType(proto.Message): + r"""Indicated the type of data that can be stored in a structured + data entity (e.g. a table). + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + list_element_type (google.cloud.automl_v1beta1.types.DataType): + If + [type_code][google.cloud.automl.v1beta1.DataType.type_code] + == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then + ``list_element_type`` is the type of the elements. + + This field is a member of `oneof`_ ``details``. + struct_type (google.cloud.automl_v1beta1.types.StructType): + If + [type_code][google.cloud.automl.v1beta1.DataType.type_code] + == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], + then ``struct_type`` provides type information for the + struct's fields. + + This field is a member of `oneof`_ ``details``. + time_format (str): + If + [type_code][google.cloud.automl.v1beta1.DataType.type_code] + == + [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP] + then ``time_format`` provides the format in which that time + field is expressed. The time_format must either be one of: + + - ``UNIX_SECONDS`` + - ``UNIX_MILLISECONDS`` + - ``UNIX_MICROSECONDS`` + - ``UNIX_NANOSECONDS`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format as + described on the type_code is used. + + This field is a member of `oneof`_ ``details``. + type_code (google.cloud.automl_v1beta1.types.TypeCode): + Required. The + [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this + type. + nullable (bool): + If true, this DataType can also be ``NULL``. In .CSV files + ``NULL`` value is expressed as an empty string. + """ + + list_element_type: 'DataType' = proto.Field( + proto.MESSAGE, + number=2, + oneof='details', + message='DataType', + ) + struct_type: 'StructType' = proto.Field( + proto.MESSAGE, + number=3, + oneof='details', + message='StructType', + ) + time_format: str = proto.Field( + proto.STRING, + number=5, + oneof='details', + ) + type_code: 'TypeCode' = proto.Field( + proto.ENUM, + number=1, + enum='TypeCode', + ) + nullable: bool = proto.Field( + proto.BOOL, + number=4, + ) + + +class StructType(proto.Message): + r"""``StructType`` defines the DataType-s of a + [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. + + Attributes: + fields (MutableMapping[str, google.cloud.automl_v1beta1.types.DataType]): + Unordered map of struct field names to their + data types. Fields cannot be added or removed + via Update. Their names and data types are still + mutable. + """ + + fields: MutableMapping[str, 'DataType'] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='DataType', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py new file mode 100644 index 00000000..8aa67e0e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'Dataset', + }, +) + + +class Dataset(proto.Message): + r"""A workspace for solving a single, particular machine learning + (ML) problem. A workspace contains examples that may be + annotated. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + translation_dataset_metadata (google.cloud.automl_v1beta1.types.TranslationDatasetMetadata): + Metadata for a dataset used for translation. + + This field is a member of `oneof`_ ``dataset_metadata``. + image_classification_dataset_metadata (google.cloud.automl_v1beta1.types.ImageClassificationDatasetMetadata): + Metadata for a dataset used for image + classification. + + This field is a member of `oneof`_ ``dataset_metadata``. + text_classification_dataset_metadata (google.cloud.automl_v1beta1.types.TextClassificationDatasetMetadata): + Metadata for a dataset used for text + classification. + + This field is a member of `oneof`_ ``dataset_metadata``. + image_object_detection_dataset_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionDatasetMetadata): + Metadata for a dataset used for image object + detection. + + This field is a member of `oneof`_ ``dataset_metadata``. + video_classification_dataset_metadata (google.cloud.automl_v1beta1.types.VideoClassificationDatasetMetadata): + Metadata for a dataset used for video + classification. + + This field is a member of `oneof`_ ``dataset_metadata``. + video_object_tracking_dataset_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingDatasetMetadata): + Metadata for a dataset used for video object + tracking. + + This field is a member of `oneof`_ ``dataset_metadata``. + text_extraction_dataset_metadata (google.cloud.automl_v1beta1.types.TextExtractionDatasetMetadata): + Metadata for a dataset used for text + extraction. + + This field is a member of `oneof`_ ``dataset_metadata``. + text_sentiment_dataset_metadata (google.cloud.automl_v1beta1.types.TextSentimentDatasetMetadata): + Metadata for a dataset used for text + sentiment. + + This field is a member of `oneof`_ ``dataset_metadata``. + tables_dataset_metadata (google.cloud.automl_v1beta1.types.TablesDatasetMetadata): + Metadata for a dataset used for Tables. + + This field is a member of `oneof`_ ``dataset_metadata``. + name (str): + Output only. The resource name of the dataset. Form: + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`` + display_name (str): + Required. The name of the dataset to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. + description (str): + User-provided description of the dataset. The + description can be up to 25000 characters long. + example_count (int): + Output only. The number of examples in the + dataset. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this dataset was + created. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + """ + + translation_dataset_metadata: translation.TranslationDatasetMetadata = proto.Field( + proto.MESSAGE, + number=23, + oneof='dataset_metadata', + message=translation.TranslationDatasetMetadata, + ) + image_classification_dataset_metadata: image.ImageClassificationDatasetMetadata = proto.Field( + proto.MESSAGE, + number=24, + oneof='dataset_metadata', + message=image.ImageClassificationDatasetMetadata, + ) + text_classification_dataset_metadata: text.TextClassificationDatasetMetadata = proto.Field( + proto.MESSAGE, + number=25, + oneof='dataset_metadata', + message=text.TextClassificationDatasetMetadata, + ) + image_object_detection_dataset_metadata: image.ImageObjectDetectionDatasetMetadata = proto.Field( + proto.MESSAGE, + number=26, + oneof='dataset_metadata', + message=image.ImageObjectDetectionDatasetMetadata, + ) + video_classification_dataset_metadata: video.VideoClassificationDatasetMetadata = proto.Field( + proto.MESSAGE, + number=31, + oneof='dataset_metadata', + message=video.VideoClassificationDatasetMetadata, + ) + video_object_tracking_dataset_metadata: video.VideoObjectTrackingDatasetMetadata = proto.Field( + proto.MESSAGE, + number=29, + oneof='dataset_metadata', + message=video.VideoObjectTrackingDatasetMetadata, + ) + text_extraction_dataset_metadata: text.TextExtractionDatasetMetadata = proto.Field( + proto.MESSAGE, + number=28, + oneof='dataset_metadata', + message=text.TextExtractionDatasetMetadata, + ) + text_sentiment_dataset_metadata: text.TextSentimentDatasetMetadata = proto.Field( + proto.MESSAGE, + number=30, + oneof='dataset_metadata', + message=text.TextSentimentDatasetMetadata, + ) + tables_dataset_metadata: tables.TablesDatasetMetadata = proto.Field( + proto.MESSAGE, + number=33, + oneof='dataset_metadata', + message=tables.TablesDatasetMetadata, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + example_count: int = proto.Field( + proto.INT32, + number=21, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + etag: str = proto.Field( + proto.STRING, + number=17, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py new file mode 100644 index 00000000..19dfde32 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import geometry +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'ImageObjectDetectionAnnotation', + 'VideoObjectTrackingAnnotation', + 'BoundingBoxMetricsEntry', + 'ImageObjectDetectionEvaluationMetrics', + 'VideoObjectTrackingEvaluationMetrics', + }, +) + + +class ImageObjectDetectionAnnotation(proto.Message): + r"""Annotation details for image object detection. + + Attributes: + bounding_box (google.cloud.automl_v1beta1.types.BoundingPoly): + Output only. The rectangle representing the + object location. + score (float): + Output only. The confidence that this annotation is positive + for the parent example, value in [0, 1], higher means higher + positivity confidence. + """ + + bounding_box: geometry.BoundingPoly = proto.Field( + proto.MESSAGE, + number=1, + message=geometry.BoundingPoly, + ) + score: float = proto.Field( + proto.FLOAT, + number=2, + ) + + +class VideoObjectTrackingAnnotation(proto.Message): + r"""Annotation details for video object tracking. + + Attributes: + instance_id (str): + Optional. The instance of the object, + expressed as a positive integer. Used to tell + apart objects of the same type (i.e. + AnnotationSpec) when multiple are present on a + single example. + NOTE: Instance ID prediction quality is not a + part of model evaluation and is done as best + effort. Especially in cases when an entity goes + off-screen for a longer time (minutes), when it + comes back it may be given a new instance ID. + time_offset (google.protobuf.duration_pb2.Duration): + Required. A time (frame) of a video to which + this annotation pertains. Represented as the + duration since the video's start. + bounding_box (google.cloud.automl_v1beta1.types.BoundingPoly): + Required. The rectangle representing the object location on + the frame (i.e. at the time_offset of the video). + score (float): + Output only. The confidence that this annotation is positive + for the video at the time_offset, value in [0, 1], higher + means higher positivity confidence. For annotations created + by the user the score is 1. When user approves an + annotation, the original float score is kept (and not + changed to 1). + """ + + instance_id: str = proto.Field( + proto.STRING, + number=1, + ) + time_offset: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + bounding_box: geometry.BoundingPoly = proto.Field( + proto.MESSAGE, + number=3, + message=geometry.BoundingPoly, + ) + score: float = proto.Field( + proto.FLOAT, + number=4, + ) + + +class BoundingBoxMetricsEntry(proto.Message): + r"""Bounding box matching model metrics for a single + intersection-over-union threshold and multiple label match + confidence thresholds. + + Attributes: + iou_threshold (float): + Output only. The intersection-over-union + threshold value used to compute this metrics + entry. + mean_average_precision (float): + Output only. The mean average precision, most often close to + au_prc. + confidence_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): + Output only. Metrics for each label-match + confidence_threshold from + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall + curve is derived from them. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + recall: float = proto.Field( + proto.FLOAT, + number=2, + ) + precision: float = proto.Field( + proto.FLOAT, + number=3, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=4, + ) + + iou_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + mean_average_precision: float = proto.Field( + proto.FLOAT, + number=2, + ) + confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=ConfidenceMetricsEntry, + ) + + +class ImageObjectDetectionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for image object detection problems. + Evaluates prediction quality of labeled bounding boxes. + + Attributes: + evaluated_bounding_box_count (int): + Output only. The total number of bounding + boxes (i.e. summed over all images) the ground + truth used to create this evaluation had. + bounding_box_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry]): + Output only. The bounding boxes match metrics + for each Intersection-over-union threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each + label confidence threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + bounding_box_mean_average_precision (float): + Output only. The single metric for bounding boxes + evaluation: the mean_average_precision averaged over all + bounding_box_metrics_entries. + """ + + evaluated_bounding_box_count: int = proto.Field( + proto.INT32, + number=1, + ) + bounding_box_metrics_entries: MutableSequence['BoundingBoxMetricsEntry'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='BoundingBoxMetricsEntry', + ) + bounding_box_mean_average_precision: float = proto.Field( + proto.FLOAT, + number=3, + ) + + +class VideoObjectTrackingEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for video object tracking problems. + Evaluates prediction quality of both labeled bounding boxes and + labeled tracks (i.e. series of bounding boxes sharing same label + and instance ID). + + Attributes: + evaluated_frame_count (int): + Output only. The number of video frames used + to create this evaluation. + evaluated_bounding_box_count (int): + Output only. The total number of bounding + boxes (i.e. summed over all frames) the ground + truth used to create this evaluation had. + bounding_box_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry]): + Output only. The bounding boxes match metrics + for each Intersection-over-union threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each + label confidence threshold + 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. + bounding_box_mean_average_precision (float): + Output only. The single metric for bounding boxes + evaluation: the mean_average_precision averaged over all + bounding_box_metrics_entries. + """ + + evaluated_frame_count: int = proto.Field( + proto.INT32, + number=1, + ) + evaluated_bounding_box_count: int = proto.Field( + proto.INT32, + number=2, + ) + bounding_box_metrics_entries: MutableSequence['BoundingBoxMetricsEntry'] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='BoundingBoxMetricsEntry', + ) + bounding_box_mean_average_precision: float = proto.Field( + proto.FLOAT, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py new file mode 100644 index 00000000..9474a410 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'NormalizedVertex', + 'BoundingPoly', + }, +) + + +class NormalizedVertex(proto.Message): + r"""A vertex represents a 2D point in the image. + The normalized vertex coordinates are between 0 to 1 fractions + relative to the original plane (image, video). E.g. if the plane + (e.g. whole image) would have size 10 x 20 then a point with + normalized coordinates (0.1, 0.3) would be at the position (1, + 6) on that plane. + + Attributes: + x (float): + Required. Horizontal coordinate. + y (float): + Required. Vertical coordinate. + """ + + x: float = proto.Field( + proto.FLOAT, + number=1, + ) + y: float = proto.Field( + proto.FLOAT, + number=2, + ) + + +class BoundingPoly(proto.Message): + r"""A bounding polygon of a detected object on a plane. On output both + vertices and normalized_vertices are provided. The polygon is formed + by connecting vertices in the order they are listed. + + Attributes: + normalized_vertices (MutableSequence[google.cloud.automl_v1beta1.types.NormalizedVertex]): + Output only . The bounding polygon normalized + vertices. + """ + + normalized_vertices: MutableSequence['NormalizedVertex'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='NormalizedVertex', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py new file mode 100644 index 00000000..59c066b2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py @@ -0,0 +1,304 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import classification + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'ImageClassificationDatasetMetadata', + 'ImageObjectDetectionDatasetMetadata', + 'ImageClassificationModelMetadata', + 'ImageObjectDetectionModelMetadata', + 'ImageClassificationModelDeploymentMetadata', + 'ImageObjectDetectionModelDeploymentMetadata', + }, +) + + +class ImageClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to image classification. + + Attributes: + classification_type (google.cloud.automl_v1beta1.types.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type: classification.ClassificationType = proto.Field( + proto.ENUM, + number=1, + enum=classification.ClassificationType, + ) + + +class ImageObjectDetectionDatasetMetadata(proto.Message): + r"""Dataset metadata specific to image object detection. + """ + + +class ImageClassificationModelMetadata(proto.Message): + r"""Model metadata for image classification. + + Attributes: + base_model_id (str): + Optional. The ID of the ``base`` model. If it is specified, + the new model will be created based on the ``base`` model. + Otherwise, the new model will be created from scratch. The + ``base`` model must be in the same ``project`` and + ``location`` as the new model to create, and have the same + ``model_type``. + train_budget (int): + Required. The train budget of creating this model, expressed + in hours. The actual ``train_cost`` will be equal or less + than this value. + train_cost (int): + Output only. The actual train cost of creating this model, + expressed in hours. If this model is created from a ``base`` + model, the train cost used to create the ``base`` model are + not included. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud`` - Model to be used via prediction calls to + AutoML API. This is the default value. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + - ``mobile-core-ml-low-latency-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have low latency, but may have lower + prediction quality than other models. + - ``mobile-core-ml-versatile-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + - ``mobile-core-ml-high-accuracy-1`` - A model that, in + addition to providing prediction via AutoML API, can also + be exported (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile device with Core ML afterwards. + Expected to have a higher latency, but should also have a + higher prediction quality than other models. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the node_qps field. + """ + + base_model_id: str = proto.Field( + proto.STRING, + number=1, + ) + train_budget: int = proto.Field( + proto.INT64, + number=2, + ) + train_cost: int = proto.Field( + proto.INT64, + number=3, + ) + stop_reason: str = proto.Field( + proto.STRING, + number=5, + ) + model_type: str = proto.Field( + proto.STRING, + number=7, + ) + node_qps: float = proto.Field( + proto.DOUBLE, + number=13, + ) + node_count: int = proto.Field( + proto.INT64, + number=14, + ) + + +class ImageObjectDetectionModelMetadata(proto.Message): + r"""Model metadata specific to image object detection. + + Attributes: + model_type (str): + Optional. Type of the model. The available values are: + + - ``cloud-high-accuracy-1`` - (default) A model to be used + via prediction calls to AutoML API. Expected to have a + higher latency, but should also have a higher prediction + quality than other models. + - ``cloud-low-latency-1`` - A model to be used via + prediction calls to AutoML API. Expected to have low + latency, but may have lower prediction quality than other + models. + - ``mobile-low-latency-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have low latency, but may have + lower prediction quality than other models. + - ``mobile-versatile-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. + - ``mobile-high-accuracy-1`` - A model that, in addition to + providing prediction via AutoML API, can also be exported + (see + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. Expected to have a higher latency, but should + also have a higher prediction quality than other models. + node_count (int): + Output only. The number of nodes this model is deployed on. + A node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the qps_per_node + field. + node_qps (float): + Output only. An approximate number of online + prediction QPS that can be supported by this + model per each node on which it is deployed. + stop_reason (str): + Output only. The reason that this create model operation + stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. + train_budget_milli_node_hours (int): + The train budget of creating this model, expressed in milli + node hours i.e. 1,000 value in this field means 1 node hour. + The actual ``train_cost`` will be equal or less than this + value. If further model training ceases to provide any + improvements, it will stop without using full budget and the + stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = + actual_hour \* number_of_nodes_invovled. For model type + ``cloud-high-accuracy-1``\ (default) and + ``cloud-low-latency-1``, the train budget must be between + 20,000 and 900,000 milli node hours, inclusive. The default + value is 216, 000 which represents one day in wall time. For + model type ``mobile-low-latency-1``, ``mobile-versatile-1``, + ``mobile-high-accuracy-1``, + ``mobile-core-ml-low-latency-1``, + ``mobile-core-ml-versatile-1``, + ``mobile-core-ml-high-accuracy-1``, the train budget must be + between 1,000 and 100,000 milli node hours, inclusive. The + default value is 24, 000 which represents one day in wall + time. + train_cost_milli_node_hours (int): + Output only. The actual train cost of + creating this model, expressed in milli node + hours, i.e. 1,000 value in this field means 1 + node hour. Guaranteed to not exceed the train + budget. + """ + + model_type: str = proto.Field( + proto.STRING, + number=1, + ) + node_count: int = proto.Field( + proto.INT64, + number=3, + ) + node_qps: float = proto.Field( + proto.DOUBLE, + number=4, + ) + stop_reason: str = proto.Field( + proto.STRING, + number=5, + ) + train_budget_milli_node_hours: int = proto.Field( + proto.INT64, + number=6, + ) + train_cost_milli_node_hours: int = proto.Field( + proto.INT64, + number=7, + ) + + +class ImageClassificationModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Classification. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + + [node_qps][google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count: int = proto.Field( + proto.INT64, + number=1, + ) + + +class ImageObjectDetectionModelDeploymentMetadata(proto.Message): + r"""Model deployment metadata specific to Image Object Detection. + + Attributes: + node_count (int): + Input only. The number of nodes to deploy the model on. A + node is an abstraction of a machine resource, which can + handle online prediction QPS as given in the model's + + [qps_per_node][google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.qps_per_node]. + Must be between 1 and 100, inclusive on both ends. + """ + + node_count: int = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py new file mode 100644 index 00000000..b156bc51 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py @@ -0,0 +1,1253 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'InputConfig', + 'BatchPredictInputConfig', + 'DocumentInputConfig', + 'OutputConfig', + 'BatchPredictOutputConfig', + 'ModelExportOutputConfig', + 'ExportEvaluatedExamplesOutputConfig', + 'GcsSource', + 'BigQuerySource', + 'GcsDestination', + 'BigQueryDestination', + 'GcrDestination', + }, +) + + +class InputConfig(proto.Message): + r"""Input configuration for ImportData Action. + + The format of input depends on dataset_metadata the Dataset into + which the import is happening has. As input source the + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is + expected, unless specified otherwise. Additionally any input .CSV + file by itself must be 100MB or smaller, unless specified otherwise. + If an "example" file (that is, image, video etc.) with identical + content (even if it had different GCS_FILE_PATH) is mentioned + multiple times, then its label, bounding boxes etc. are appended. + The same file should be always provided with the same ML_USE and + GCS_FILE_PATH, if it is not, then these values are + nondeterministically selected from the given ones. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + - For Image Classification: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH,LABEL,LABEL,... GCS_FILE_PATH leads to image + of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, + .WEBP, .BMP, .TIFF, .ICO For MULTICLASS classification type, at + most one LABEL is allowed per image. If an image has not yet been + labeled, then it should be mentioned just once with no LABEL. + Some sample rows: TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg + + - For Image Object Detection: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) + GCS_FILE_PATH leads to image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG. Each image is assumed to be + exhaustively labeled. The minimum allowed BOUNDING_BOX edge + length is 0.01, and no more than 500 BOUNDING_BOX-es per image + are allowed (one BOUNDING_BOX is defined per line). If an image + has not yet been labeled, then it should be mentioned just once + with no LABEL and the ",,,,,,," in place of the BOUNDING_BOX. For + images which are known to not contain any bounding boxes, they + should be labelled explictly as "NEGATIVE_IMAGE", followed by + ",,,,,,," in place of the BOUNDING_BOX. Sample rows: + TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, + TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, + + - For Video Classification: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be + used. The GCS_FILE_PATH should lead to another .csv file which + describes examples that have given ML_USE, using the following + row format: + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) + Here GCS_FILE_PATH leads to a video of up to 50GB in size and up + to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length + of the video, and end has to be after the start. Any segment of a + video which has one or more labels on it, is considered a hard + negative for all other labels. Any segment with no labels on it + is considered to be unknown. If a whole video is unknown, then it + shuold be mentioned just once with ",," in place of LABEL, + TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file + for a particular ML_USE: + gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, + + - For Video Object Tracking: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be + used. The GCS_FILE_PATH should lead to another .csv file which + describes examples that have given ML_USE, using one of the + following row format: + GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or + GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up + to 50GB in size and up to 3h duration. Supported extensions: + .MOV, .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to + obtain a better model. When a specific labeled entity leaves the + video frame, and shows up afterwards it is not required, albeit + preferable, that the same INSTANCE_ID is given to it. TIMESTAMP + must be within the length of the video, the BOUNDING_BOX is + assumed to be drawn on the closest video's frame to the + TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected to be + exhaustively labeled and no more than 500 BOUNDING_BOX-es per + frame are allowed. If a whole video is unknown, then it should be + mentioned just once with ",,,,,,,,,," in place of LABEL, + [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top level CSV file: + TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a + CSV file for a particular ML_USE: + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, + + - For Text Extraction: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .JSONL (that is, + JSON Lines) file which either imports text in-line or as + documents. Any given .JSONL file must be 100MB or smaller. The + in-line .JSONL file contains, per line, a proto that wraps a + TextSnippet proto (in json representation) followed by one or + more AnnotationPayload protos (called annotations), which have + display_name and text_extraction detail populated. The given text + is expected to be annotated exhaustively, for example, if you + look for animals and text contains "dolphin" that is not labeled, + then "dolphin" is assumed to not be an animal. Any given text + snippet content must be 10KB or smaller, and also be UTF-8 NFC + encoded (ASCII already is). The document .JSONL file contains, + per line, a proto that wraps a Document proto. The Document proto + must have either document_text or input_config set. In + document_text case, the Document proto may also contain the + spatial information of the document, including layout, document + dimension and page number. In input_config case, only PDF + documents are supported now, and each document may be up to 2MB + large. Currently, annotations on documents cannot be specified at + import. Three sample CSV rows: TRAIN,gs://folder/file1.jsonl + VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl + Sample in-line JSON Lines file for entity extraction (presented + here with artificial line breaks, but the only actual line break + is denoted by \\n).: { "document": { "document_text": {"content": + "dog cat"} "layout": [ { "text_segment": { "start_offset": 0, + "end_offset": 3, }, "page_number": 1, "bounding_poly": { + "normalized_vertices": [ {"x": 0.1, "y": 0.1}, {"x": 0.1, "y": + 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, ], }, + "text_segment_type": TOKEN, }, { "text_segment": { + "start_offset": 4, "end_offset": 7, }, "page_number": 1, + "bounding_poly": { "normalized_vertices": [ {"x": 0.4, "y": 0.1}, + {"x": 0.4, "y": 0.3}, {"x": 0.8, "y": 0.3}, {"x": 0.8, "y": 0.1}, + ], }, "text_segment_type": TOKEN, } + + :: + + ], + "document_dimensions": { + "width": 8.27, + "height": 11.69, + "unit": INCH, + } + "page_count": 1, + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": {"text_segment": {"start_offset": 0, + "end_offset": 3}} + }, + { + "display_name": "animal", + "text_extraction": {"text_segment": {"start_offset": 4, + "end_offset": 7}} + } + ], + }\n + { + "text_snippet": { + "content": "This dog is good." + }, + "annotations": [ + { + "display_name": "animal", + "text_extraction": { + "text_segment": {"start_offset": 5, "end_offset": 8} + } + } + ] + } + Sample document JSON Lines file (presented here with artificial line + breaks, but the only actual line break is denoted by \n).: + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] + } + } + } + }\n + { + "document": { + "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] + } + } + } + } + + - For Text Classification: CSV file(s) with each line in format: + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,... + TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If + the column content is a valid gcs file path, i.e. prefixed by + "gs://", it will be treated as a GCS_FILE_PATH, else if the + content is enclosed within double quotes (""), it is treated as a + TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a + .txt file with UTF-8 encoding, for example, + "gs://folder/content.txt", and the content in it is extracted as + a text snippet. In TEXT_SNIPPET case, the column content + excluding quotes is treated as to be imported text snippet. In + both cases, the text snippet/file size must be within 128kB. + Maximum 100 unique labels are allowed per CSV row. Sample rows: + TRAIN,"They have bad food and very rude",RudeService,BadFood + TRAIN,gs://folder/content.txt,SlowService TEST,"Typically always + bad service there.",RudeService VALIDATE,"Stomach ache to + go.",BadFood + + - For Text Sentiment: CSV file(s) with each line in format: + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and + GCS_FILE_PATH are distinguished by a pattern. If the column + content is a valid gcs file path, that is, prefixed by "gs://", + it is treated as a GCS_FILE_PATH, otherwise it is treated as a + TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a + .txt file with UTF-8 encoding, for example, + "gs://folder/content.txt", and the content in it is extracted as + a text snippet. In TEXT_SNIPPET case, the column content itself + is treated as to be imported text snippet. In both cases, the + text snippet must be up to 500 characters long. Sample rows: + TRAIN,"@freewrytin this is way too good for your product",2 + TRAIN,"I need this product so bad",3 TEST,"Thank you for this + product.",4 VALIDATE,gs://folder/content.txt,2 + + - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] + or + + [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source] + can be used. All inputs is concatenated into a single + + [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name] + For gcs_source: CSV file(s), where the first row of the first file + is the header, containing unique column names. If the first row of a + subsequent file is the same as the header, then it is also treated + as a header. All other rows contain values for the corresponding + columns. Each .CSV file by itself must be 10GB or smaller, and their + total size must be 100GB or smaller. First three sample rows of a + CSV file: "Id","First Name","Last Name","Dob","Addresses" + + "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" + + "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} + For bigquery_source: An URI of a BigQuery table. The user data size + of the BigQuery table must be 100GB or smaller. An imported table + must have between 2 and 1,000 columns, inclusive, and between 1000 + and 100,000,000 rows, inclusive. There are at most 5 import data + running in parallel. Definitions: ML_USE = "TRAIN" \| "VALIDATE" \| + "TEST" \| "UNASSIGNED" Describes how the given example (file) should + be used for model training. "UNASSIGNED" can be used when user has + no preference. GCS_FILE_PATH = A path to file on GCS, e.g. + "gs://folder/image1.png". LABEL = A display name of an object on an + image, video etc., e.g. "dog". Must be up to 32 characters long and + can consist only of ASCII Latin letters A-Z and a-z, underscores(_), + and ASCII digits 0-9. For each label an AnnotationSpec is created + which display_name becomes the label; AnnotationSpecs are given back + in predictions. INSTANCE_ID = A positive integer that identifies a + specific instance of a labeled entity on an example. Used e.g. to + track two cars on a video while being able to tell apart which one + is which. BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX \| + VERTEX,,,VERTEX,, A rectangle parallel to the frame of the example + (image, video). If 4 vertices are given they are connected by edges + in the order provided, if 2 are given they are recognized as + diagonally opposite vertices of the rectangle. VERTEX = + COORDINATE,COORDINATE First coordinate is horizontal (x), the second + is vertical (y). COORDINATE = A float in 0 to 1 range, relative to + total length of image or video in given dimension. For fractions the + leading non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is + in top left. TIME_SEGMENT_START = TIME_OFFSET Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses an + end, exclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_OFFSET = A number of seconds as + measured from the start of an example (e.g. video). Fractions are + allowed, up to a microsecond precision. "inf" is allowed, and it + means the end of the example. TEXT_SNIPPET = A content of a text + snippet, UTF-8 encoded, enclosed within double quotes (""). + SENTIMENT = An integer between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). + Describes the ordinal of the sentiment - higher value means a more + positive sentiment. All the values are completely relative, i.e. + neither 0 needs to mean a negative or neutral sentiment nor + sentiment_max needs to mean a positive one - it is just required + that 0 is the least positive sentiment in the data, and + sentiment_max is the most positive one. The SENTIMENT shouldn't be + confused with "score" or "magnitude" from the previous Natural + Language Sentiment Analysis API. All SENTIMENT values between 0 and + sentiment_max must be represented in the imported data. On + prediction the same 0 to sentiment_max range will be used. The + difference between neighboring sentiment values needs not to be + uniform, e.g. 1 and 2 may be similar whereas the difference between + 2 and 3 may be huge. + + Errors: If any of the provided CSV files can't be parsed or if more + than certain percent of CSV rows cannot be processed then the + operation fails and nothing is imported. Regardless of overall + success or failure the per-row failures, up to a certain count cap, + is listed in Operation.metadata.partial_failures. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.automl_v1beta1.types.GcsSource): + The Google Cloud Storage location for the input content. In + ImportData, the gcs_source points to a csv with structure + described in the comment. + + This field is a member of `oneof`_ ``source``. + bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource): + The BigQuery location for the input content. + + This field is a member of `oneof`_ ``source``. + params (MutableMapping[str, str]): + Additional domain-specific parameters describing the + semantic of the imported data, any string must be up to + 25000 characters long. + + - For Tables: ``schema_inference_version`` - (integer) + Required. The version of the algorithm that should be + used for the initial inference of the schema (columns' + DataTypes) of the table the data is being imported into. + Allowed values: "1". + """ + + gcs_source: 'GcsSource' = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message='GcsSource', + ) + bigquery_source: 'BigQuerySource' = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message='BigQuerySource', + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class BatchPredictInputConfig(proto.Message): + r"""Input configuration for BatchPredict Action. + + The format of input depends on the ML problem of the model used for + prediction. As input source the + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is + expected, unless specified otherwise. + + The formats are represented in EBNF with commas being literal and + with non-terminal symbols defined near the end of this comment. The + formats are: + + - For Image Classification: CSV file(s) with each line having just + a single column: GCS_FILE_PATH which leads to image of up to 30MB + in size. Supported extensions: .JPEG, .GIF, .PNG. This path is + treated as the ID in the Batch predict output. Three sample rows: + gs://folder/image1.jpeg gs://folder/image2.gif + gs://folder/image3.png + + - For Image Object Detection: CSV file(s) with each line having + just a single column: GCS_FILE_PATH which leads to image of up to + 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. This path + is treated as the ID in the Batch predict output. Three sample + rows: gs://folder/image1.jpeg gs://folder/image2.gif + gs://folder/image3.png + + - For Video Classification: CSV file(s) with each line in format: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH + leads to video of up to 50GB in size and up to 3h duration. + Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length + of the video, and end has to be after the start. Three sample + rows: gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf + + - For Video Object Tracking: CSV file(s) with each line in format: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH + leads to video of up to 50GB in size and up to 3h duration. + Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length + of the video, and end has to be after the start. Three sample + rows: gs://folder/video1.mp4,10,240 + gs://folder/video1.mp4,300,360 gs://folder/vid2.mov,0,inf + + - For Text Classification: CSV file(s) with each line having just a + single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file + can have size upto 128kB. Any given text snippet content must + have 60,000 characters or less. Three sample rows: + gs://folder/text1.txt "Some text content to predict" + gs://folder/text3.pdf Supported file extensions: .txt, .pdf + + - For Text Sentiment: CSV file(s) with each line having just a + single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file + can have size upto 128kB. Any given text snippet content must + have 500 characters or less. Three sample rows: + gs://folder/text1.txt "Some text content to predict" + gs://folder/text3.pdf Supported file extensions: .txt, .pdf + + - For Text Extraction .JSONL (i.e. JSON Lines) file(s) which either + provide text in-line or as documents (for a single BatchPredict + call only one of the these formats may be used). The in-line + .JSONL file(s) contain per line a proto that wraps a temporary + user-assigned TextSnippet ID (string up to 2000 characters long) + called "id", a TextSnippet proto (in json representation) and + zero or more TextFeature protos. Any given text snippet content + must have 30,000 characters or less, and also be UTF-8 NFC + encoded (ASCII already is). The IDs provided should be unique. + The document .JSONL file(s) contain, per line, a proto that wraps + a Document proto with input_config set. Only PDF documents are + supported now, and each document must be up to 2MB large. Any + given .JSONL file must be 100MB or smaller, and no more than 20 + files may be given. Sample in-line JSON Lines file (presented + here with artificial line breaks, but the only actual line break + is denoted by \\n): { "id": "my_first_id", "text_snippet": { + "content": "dog car cat"}, "text_features": [ { "text_segment": + {"start_offset": 4, "end_offset": 6}, "structural_type": + PARAGRAPH, "bounding_poly": { "normalized_vertices": [ {"x": 0.1, + "y": 0.1}, {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, + "y": 0.1}, ] }, } ], }\n { "id": "2", "text_snippet": { + "content": "An elaborate content", "mime_type": "text/plain" } } + Sample document JSON Lines file (presented here with artificial + line breaks, but the only actual line break is denoted by \\n).: + { "document": { "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document1.pdf" ] } } } }\n { "document": { + "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document2.pdf" ] } } } } + + - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] + or + + [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]. + GCS case: CSV file(s), each by itself 10GB or smaller and total size + must be 100GB or smaller, where first file must have a header + containing column names. If the first row of a subsequent file is + the same as the header, then it is also treated as a header. All + other rows contain values for the corresponding columns. The column + names must contain the model's + + [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows, i.e. the CSV + lines, will be attempted. For FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + all columns having + + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] + type will be ignored. First three sample rows of a CSV file: "First + Name","Last Name","Dob","Addresses" + + "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" + + "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} + BigQuery case: An URI of a BigQuery table. The user data size of the + BigQuery table must be 100GB or smaller. The column names must + contain the model's + + [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn't matter). The columns corresponding to the model's + input feature column specs must contain values compatible with the + column spec's data types. Prediction on all the rows of the table + will be attempted. For FORECASTING + + [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + all columns having + + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] + type will be ignored. + + Definitions: GCS_FILE_PATH = A path to file on GCS, e.g. + "gs://folder/video.avi". TEXT_SNIPPET = A content of a text snippet, + UTF-8 encoded, enclosed within double quotes ("") TIME_SEGMENT_START + = TIME_OFFSET Expresses a beginning, inclusive, of a time segment + within an example that has a time dimension (e.g. video). + TIME_SEGMENT_END = TIME_OFFSET Expresses an end, exclusive, of a + time segment within an example that has a time dimension (e.g. + video). TIME_OFFSET = A number of seconds as measured from the start + of an example (e.g. video). Fractions are allowed, up to a + microsecond precision. "inf" is allowed and it means the end of the + example. + + Errors: If any of the provided CSV files can't be parsed or if more + than certain percent of CSV rows cannot be processed then the + operation fails and prediction does not happen. Regardless of + overall success or failure the per-row failures, up to a certain + count cap, will be listed in Operation.metadata.partial_failures. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.automl_v1beta1.types.GcsSource): + The Google Cloud Storage location for the + input content. + + This field is a member of `oneof`_ ``source``. + bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource): + The BigQuery location for the input content. + + This field is a member of `oneof`_ ``source``. + """ + + gcs_source: 'GcsSource' = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message='GcsSource', + ) + bigquery_source: 'BigQuerySource' = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message='BigQuerySource', + ) + + +class DocumentInputConfig(proto.Message): + r"""Input configuration of a + [Document][google.cloud.automl.v1beta1.Document]. + + Attributes: + gcs_source (google.cloud.automl_v1beta1.types.GcsSource): + The Google Cloud Storage location of the + document file. Only a single path should be + given. Max supported size: 512MB. + Supported extensions: .PDF. + """ + + gcs_source: 'GcsSource' = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class OutputConfig(proto.Message): + r"""- For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file + which describes examples that have given ML_USE, using the + following row format per line: TEXT_SNIPPET (in source language) + \\t TEXT_SNIPPET (in target language) + + - For Tables: Output depends on whether the dataset was imported + from GCS or BigQuery. GCS case: + + [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination] + must be set. Exported are CSV file(s) ``tables_1.csv``, + ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as header + line the table's column names, and all other lines contain values + for the header columns. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + + ``export_data__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that + dataset a new table called ``primary_table`` will be created, and + filled with precisely the same data as this obtained on import. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): + The Google Cloud Storage location where the output is to be + written to. For Image Object Detection, Text Extraction, + Video Classification and Tables, in the given directory a + new directory will be created with name: export_data-- where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + All export output will be written into that directory. + + This field is a member of `oneof`_ ``destination``. + bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): + The BigQuery location where the output is to + be written to. + + This field is a member of `oneof`_ ``destination``. + """ + + gcs_destination: 'GcsDestination' = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message='GcsDestination', + ) + bigquery_destination: 'BigQueryDestination' = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message='BigQueryDestination', + ) + + +class BatchPredictOutputConfig(proto.Message): + r"""Output configuration for BatchPredict Action. + + As destination the + + [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] + must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory + is created. Its name will be "prediction--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends + on the ML problem the predictions are made for. + + - For Image Classification: In the created directory files + ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. A + single image will be listed only once with all its annotations, + and its annotations will never be split across files. Each .JSONL + file will contain, per line, a JSON representation of a proto + that wraps image's "ID" : "" followed by a list of zero + or more AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image + failed (partially or completely), then an additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``\ fields. + + - For Image Object Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of the successfully predicted images and annotations. Each + .JSONL file will contain, per line, a JSON representation of a + proto that wraps image's "ID" : "" followed by a list + of zero or more AnnotationPayload protos (called annotations), + which have image_object_detection detail populated. A single + image will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` + files will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps the same "ID" : "" but here followed + by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``\ fields. + + - For Video Classification: In the created directory a + video_classification.csv file, and a .JSON file per each video + classification requested in the input (i.e. each line in given + CSV(s)), will be created. + + :: + + The format of video_classification.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 + to 1 the prediction input lines (i.e. video_classification.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = + "OK" if prediction completed successfully, or an error code with + message otherwise. If STATUS is not "OK" then the .JSON file for + that line may not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for the video time segment the file is assigned to in the + video_classification.csv. All AnnotationPayload protos will have + video_classification field set, and will be sorted by + video_classification.type field (note that the returned types are + governed by `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). + + - For Video Object Tracking: In the created directory a + video_object_tracking.csv file will be created, and multiple + files video_object_trackinng_1.json, + video_object_trackinng_2.json,..., video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number + of lines in given CSV(s)). + + :: + + The format of video_object_tracking.csv is: + + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS + where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 + to 1 the prediction input lines (i.e. video_object_tracking.csv has + precisely the same number of lines as the prediction input had.) + JSON_FILE_NAME = Name of .JSON file in the output directory, which + contains prediction responses for the video time segment. STATUS = + "OK" if prediction completed successfully, or an error code with + message otherwise. If STATUS is not "OK" then the .JSON file for + that line may not exist or be empty. + + :: + + Each .JSON file, assuming STATUS is "OK", will contain a list of + AnnotationPayload protos in JSON format, which are the predictions + for each frame of the video time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. + + - For Text Classification: In the created directory files + ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text snippet or input text file and a list of + zero or more AnnotationPayload protos (called annotations), which + have classification detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any text snippet or file failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Text Sentiment: In the created directory files + ``text_sentiment_1.jsonl``, + ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will + be created, where N may be 1, and depends on the total number of + inputs and annotations found. + + :: + + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text snippet or input text file and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_sentiment detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. + + If prediction for any text snippet or file failed (partially or + completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Text Extraction: In the created directory files + ``text_extraction_1.jsonl``, + ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` + will be created, where N may be 1, and depends on the total + number of inputs and annotations found. The contents of these + .JSONL file(s) depend on whether the input used inline text, or + documents. If input was inline, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request text snippet's "id" (if specified), followed by + input text snippet, and a list of zero or more AnnotationPayload + protos (called annotations), which have text_extraction detail + populated. A single text snippet will be listed only once with + all its annotations, and its annotations will never be split + across files. If input used documents, then each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + given in request document proto, followed by its OCR-ed + representation in the form of a text snippet, finally followed by + a list of zero or more AnnotationPayload protos (called + annotations), which have text_extraction detail populated and + refer, via their indices, to the OCR-ed text snippet. A single + document (and its text snippet) will be listed only once with all + its annotations, and its annotations will never be split across + files. If prediction for any text snippet failed (partially or + completely), then additional ``errors_1.jsonl``, + ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created + (N depends on total number of failed predictions). These files + will have a JSON representation of a proto that wraps either the + "id" : "" (in case of inline) or the document proto (in + case of document) but here followed by exactly one + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + containing only ``code`` and ``message``. + + - For Tables: Output depends on whether + + [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] + or + + [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination] + is set (either is allowed). GCS case: In the created directory files + ``tables_1.csv``, ``tables_2.csv``,..., ``tables_N.csv`` will be + created, where N may be 1, and depends on the total number of the + successfully predicted rows. For all CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + given on input followed by M target column names in the format of + + "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>\_\_score" + where M is the number of distinct target values, i.e. number of + distinct values in the target column of the table used to train the + model. Subsequent lines will contain the respective values of + successfully predicted rows, with the last, i.e. the target, columns + having the corresponding prediction + [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For + REGRESSION and FORECASTING + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns' + [display_name-s][google.cloud.automl.v1beta1.display_name] given on + input followed by the predicted target column with name in the + format of + + "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" + Subsequent lines will contain the respective values of successfully + predicted rows, with the last, i.e. the target, column having the + predicted target value. If prediction for any rows failed, then an + additional ``errors_1.csv``, ``errors_2.csv``,..., ``errors_N.csv`` + will be created (N depends on total number of failed rows). These + files will have analogous format as ``tables_*.csv``, but always + with a single target column having + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + represented as a JSON string, and containing only ``code`` and + ``message``. BigQuery case: + + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + ``prediction__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and ``errors``. + The ``predictions`` table's column names will be the input columns' + + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + followed by the target column with name in the format of + + "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" + The input feature columns will contain the respective values of + successfully predicted rows, with the target column having an ARRAY + of + + [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. + The ``errors`` table contains rows for which the prediction has + failed, it has analogous input columns while the target column name + is in the format of + + "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", + and as a value has + + [``google.rpc.Status``](https: + //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) + represented as a STRUCT, and containing only ``code`` and + ``message``. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): + The Google Cloud Storage location of the + directory where the output is to be written to. + + This field is a member of `oneof`_ ``destination``. + bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): + The BigQuery location where the output is to + be written to. + + This field is a member of `oneof`_ ``destination``. + """ + + gcs_destination: 'GcsDestination' = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message='GcsDestination', + ) + bigquery_destination: 'BigQueryDestination' = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message='BigQueryDestination', + ) + + +class ModelExportOutputConfig(proto.Message): + r"""Output configuration for ModelExport Action. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): + The Google Cloud Storage location where the model is to be + written to. This location may only be set for the following + model formats: "tflite", "edgetpu_tflite", "tf_saved_model", + "tf_js", "core_ml". + + Under the directory given as the destination a new one with + name "model-export--", where timestamp is in + YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. + Inside the model and any of its supporting files will be + written. + + This field is a member of `oneof`_ ``destination``. + gcr_destination (google.cloud.automl_v1beta1.types.GcrDestination): + The GCR location where model image is to be + pushed to. This location may only be set for the + following model formats: + + "docker". + + The model image will be created under the given + URI. + + This field is a member of `oneof`_ ``destination``. + model_format (str): + The format in which the model must be exported. The + available, and default, formats depend on the problem and + model type (if given problem and type combination doesn't + have a format listed, it means its models are not + exportable): + + - For Image Classification mobile-low-latency-1, + mobile-versatile-1, mobile-high-accuracy-1: "tflite" + (default), "edgetpu_tflite", "tf_saved_model", "tf_js", + "docker". + + - For Image Classification mobile-core-ml-low-latency-1, + mobile-core-ml-versatile-1, + mobile-core-ml-high-accuracy-1: "core_ml" (default). + + - For Image Object Detection mobile-low-latency-1, + mobile-versatile-1, mobile-high-accuracy-1: "tflite", + "tf_saved_model", "tf_js". + + - For Video Classification cloud, "tf_saved_model". + + - For Video Object Tracking cloud, "tf_saved_model". + + - For Video Object Tracking mobile-versatile-1: "tflite", + "edgetpu_tflite", "tf_saved_model", "docker". + + - For Video Object Tracking mobile-coral-versatile-1: + "tflite", "edgetpu_tflite", "docker". + + - For Video Object Tracking mobile-coral-low-latency-1: + "tflite", "edgetpu_tflite", "docker". + + - For Video Object Tracking mobile-jetson-versatile-1: + "tf_saved_model", "docker". + + - For Tables: "docker". + + Formats description: + + - tflite - Used for Android mobile devices. + - edgetpu_tflite - Used for `Edge + TPU `__ devices. + - tf_saved_model - A tensorflow model in SavedModel format. + - tf_js - A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + - docker - Used for Docker containers. Use the params field + to customize the container. The container is verified to + work correctly on ubuntu 16.04 operating system. See more + at [containers + + quickstart](https: + //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) + + - core_ml - Used for iOS mobile devices. + params (MutableMapping[str, str]): + Additional model-type and format specific parameters + describing the requirements for the to be exported model + files, any string must be up to 25000 characters long. + + - For ``docker`` format: ``cpu_architecture`` - (string) + "x86_64" (default). ``gpu_architecture`` - (string) + "none" (default), "nvidia". + """ + + gcs_destination: 'GcsDestination' = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message='GcsDestination', + ) + gcr_destination: 'GcrDestination' = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message='GcrDestination', + ) + model_format: str = proto.Field( + proto.STRING, + number=4, + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class ExportEvaluatedExamplesOutputConfig(proto.Message): + r"""Output configuration for ExportEvaluatedExamples Action. Note that + this call is available only for 30 days since the moment the model + was evaluated. The output depends on the domain, as follows (note + that only examples from the TEST set are exported): + + - For Tables: + + [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] + pointing to a BigQuery project must be set. In the given project a + new dataset will be created with name + + ``export_evaluated_examples__`` + where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be + in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset an ``evaluated_examples`` table will be created. It will + have all the same columns as the + + [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id] + of the [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from + which the model was created, as they were at the moment of model's + evaluation (this includes the target column with its ground truth), + followed by a column called "predicted_". That last + column will contain the model's prediction result for each + respective row, given as ARRAY of + [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): + The BigQuery location where the output is to + be written to. + + This field is a member of `oneof`_ ``destination``. + """ + + bigquery_destination: 'BigQueryDestination' = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message='BigQueryDestination', + ) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + + Attributes: + input_uris (MutableSequence[str]): + Required. Google Cloud Storage URIs to input files, up to + 2000 characters long. Accepted forms: + + - Full object path, e.g. gs://bucket/directory/object.csv + """ + + input_uris: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class BigQuerySource(proto.Message): + r"""The BigQuery location for the input content. + + Attributes: + input_uri (str): + Required. BigQuery URI to a table, up to 2000 characters + long. Accepted forms: + + - BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId + """ + + input_uri: str = proto.Field( + proto.STRING, + number=1, + ) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output directory, up + to 2000 characters long. Accepted forms: + + - Prefix path: gs://bucket/directory The requesting user + must have write permission to the bucket. The directory + is created if it doesn't exist. + """ + + output_uri_prefix: str = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQueryDestination(proto.Message): + r"""The BigQuery location for the output content. + + Attributes: + output_uri (str): + Required. BigQuery URI to a project, up to 2000 characters + long. Accepted forms: + + - BigQuery path e.g. bq://projectId + """ + + output_uri: str = proto.Field( + proto.STRING, + number=1, + ) + + +class GcrDestination(proto.Message): + r"""The GCR location where the image must be pushed to. + + Attributes: + output_uri (str): + Required. Google Contained Registry URI of the new image, up + to 2000 characters long. See + + https: //cloud.google.com/container-registry/do // + cs/pushing-and-pulling#pushing_an_image_to_a_registry + Accepted forms: + + - [HOSTNAME]/[PROJECT-ID]/[IMAGE] + - [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG] + + The requesting user must have permission to push images the + project. + """ + + output_uri: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py new file mode 100644 index 00000000..f83543cb --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'Model', + }, +) + + +class Model(proto.Message): + r"""API proto representing a trained machine learning model. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + translation_model_metadata (google.cloud.automl_v1beta1.types.TranslationModelMetadata): + Metadata for translation models. + + This field is a member of `oneof`_ ``model_metadata``. + image_classification_model_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelMetadata): + Metadata for image classification models. + + This field is a member of `oneof`_ ``model_metadata``. + text_classification_model_metadata (google.cloud.automl_v1beta1.types.TextClassificationModelMetadata): + Metadata for text classification models. + + This field is a member of `oneof`_ ``model_metadata``. + image_object_detection_model_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelMetadata): + Metadata for image object detection models. + + This field is a member of `oneof`_ ``model_metadata``. + video_classification_model_metadata (google.cloud.automl_v1beta1.types.VideoClassificationModelMetadata): + Metadata for video classification models. + + This field is a member of `oneof`_ ``model_metadata``. + video_object_tracking_model_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingModelMetadata): + Metadata for video object tracking models. + + This field is a member of `oneof`_ ``model_metadata``. + text_extraction_model_metadata (google.cloud.automl_v1beta1.types.TextExtractionModelMetadata): + Metadata for text extraction models. + + This field is a member of `oneof`_ ``model_metadata``. + tables_model_metadata (google.cloud.automl_v1beta1.types.TablesModelMetadata): + Metadata for Tables models. + + This field is a member of `oneof`_ ``model_metadata``. + text_sentiment_model_metadata (google.cloud.automl_v1beta1.types.TextSentimentModelMetadata): + Metadata for text sentiment models. + + This field is a member of `oneof`_ ``model_metadata``. + name (str): + Output only. Resource name of the model. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + display_name (str): + Required. The name of the model to show in the interface. + The name can be up to 32 characters long and can consist + only of ASCII Latin letters A-Z and a-z, underscores (_), + and ASCII digits 0-9. It must start with a letter. + dataset_id (str): + Required. The resource ID of the dataset used + to create the model. The dataset must come from + the same ancestor project and location. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the model + training finished and can be used for + prediction. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this model was + last updated. + deployment_state (google.cloud.automl_v1beta1.types.Model.DeploymentState): + Output only. Deployment state of the model. A + model can only serve prediction requests after + it gets deployed. + """ + class DeploymentState(proto.Enum): + r"""Deployment state of the model. + + Values: + DEPLOYMENT_STATE_UNSPECIFIED (0): + Should not be used, an un-set enum has this + value by default. + DEPLOYED (1): + Model is deployed. + UNDEPLOYED (2): + Model is not deployed. + """ + DEPLOYMENT_STATE_UNSPECIFIED = 0 + DEPLOYED = 1 + UNDEPLOYED = 2 + + translation_model_metadata: translation.TranslationModelMetadata = proto.Field( + proto.MESSAGE, + number=15, + oneof='model_metadata', + message=translation.TranslationModelMetadata, + ) + image_classification_model_metadata: image.ImageClassificationModelMetadata = proto.Field( + proto.MESSAGE, + number=13, + oneof='model_metadata', + message=image.ImageClassificationModelMetadata, + ) + text_classification_model_metadata: text.TextClassificationModelMetadata = proto.Field( + proto.MESSAGE, + number=14, + oneof='model_metadata', + message=text.TextClassificationModelMetadata, + ) + image_object_detection_model_metadata: image.ImageObjectDetectionModelMetadata = proto.Field( + proto.MESSAGE, + number=20, + oneof='model_metadata', + message=image.ImageObjectDetectionModelMetadata, + ) + video_classification_model_metadata: video.VideoClassificationModelMetadata = proto.Field( + proto.MESSAGE, + number=23, + oneof='model_metadata', + message=video.VideoClassificationModelMetadata, + ) + video_object_tracking_model_metadata: video.VideoObjectTrackingModelMetadata = proto.Field( + proto.MESSAGE, + number=21, + oneof='model_metadata', + message=video.VideoObjectTrackingModelMetadata, + ) + text_extraction_model_metadata: text.TextExtractionModelMetadata = proto.Field( + proto.MESSAGE, + number=19, + oneof='model_metadata', + message=text.TextExtractionModelMetadata, + ) + tables_model_metadata: tables.TablesModelMetadata = proto.Field( + proto.MESSAGE, + number=24, + oneof='model_metadata', + message=tables.TablesModelMetadata, + ) + text_sentiment_model_metadata: text.TextSentimentModelMetadata = proto.Field( + proto.MESSAGE, + number=22, + oneof='model_metadata', + message=text.TextSentimentModelMetadata, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + dataset_id: str = proto.Field( + proto.STRING, + number=3, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + deployment_state: DeploymentState = proto.Field( + proto.ENUM, + number=8, + enum=DeploymentState, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py new file mode 100644 index 00000000..f195068a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'ModelEvaluation', + }, +) + + +class ModelEvaluation(proto.Message): + r"""Evaluation results of a model. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + classification_evaluation_metrics (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics): + Model evaluation metrics for image, text, + video and tables classification. + Tables problem is considered a classification + when the target column is CATEGORY DataType. + + This field is a member of `oneof`_ ``metrics``. + regression_evaluation_metrics (google.cloud.automl_v1beta1.types.RegressionEvaluationMetrics): + Model evaluation metrics for Tables + regression. Tables problem is considered a + regression when the target column has FLOAT64 + DataType. + + This field is a member of `oneof`_ ``metrics``. + translation_evaluation_metrics (google.cloud.automl_v1beta1.types.TranslationEvaluationMetrics): + Model evaluation metrics for translation. + + This field is a member of `oneof`_ ``metrics``. + image_object_detection_evaluation_metrics (google.cloud.automl_v1beta1.types.ImageObjectDetectionEvaluationMetrics): + Model evaluation metrics for image object + detection. + + This field is a member of `oneof`_ ``metrics``. + video_object_tracking_evaluation_metrics (google.cloud.automl_v1beta1.types.VideoObjectTrackingEvaluationMetrics): + Model evaluation metrics for video object + tracking. + + This field is a member of `oneof`_ ``metrics``. + text_sentiment_evaluation_metrics (google.cloud.automl_v1beta1.types.TextSentimentEvaluationMetrics): + Evaluation metrics for text sentiment models. + + This field is a member of `oneof`_ ``metrics``. + text_extraction_evaluation_metrics (google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics): + Evaluation metrics for text extraction + models. + + This field is a member of `oneof`_ ``metrics``. + name (str): + Output only. Resource name of the model evaluation. Format: + + ``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`` + annotation_spec_id (str): + Output only. The ID of the annotation spec that the model + evaluation applies to. The The ID is empty for the overall + model evaluation. For Tables annotation specs in the dataset + do not exist and this ID is always not set, but for + CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] + the + [display_name][google.cloud.automl.v1beta1.ModelEvaluation.display_name] + field is used. + display_name (str): + Output only. The value of + [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] + at the moment when the model was trained. Because this field + returns a value at model training time, for different models + trained from the same dataset, the values may differ, since + display names could had been changed between the two model's + trainings. For Tables CLASSIFICATION + + [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] + distinct values of the target column at the moment of the + model evaluation are populated here. The display_name is + empty for the overall model evaluation. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this model + evaluation was created. + evaluated_example_count (int): + Output only. The number of examples used for model + evaluation, i.e. for which ground truth from time of model + creation is compared against the predicted annotations + created by the model. For overall ModelEvaluation (i.e. with + annotation_spec_id not set) this is the total number of all + examples used for evaluation. Otherwise, this is the count + of examples that according to the ground truth were + annotated by the + + [annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id]. + """ + + classification_evaluation_metrics: classification.ClassificationEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=8, + oneof='metrics', + message=classification.ClassificationEvaluationMetrics, + ) + regression_evaluation_metrics: regression.RegressionEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=24, + oneof='metrics', + message=regression.RegressionEvaluationMetrics, + ) + translation_evaluation_metrics: translation.TranslationEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=9, + oneof='metrics', + message=translation.TranslationEvaluationMetrics, + ) + image_object_detection_evaluation_metrics: detection.ImageObjectDetectionEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=12, + oneof='metrics', + message=detection.ImageObjectDetectionEvaluationMetrics, + ) + video_object_tracking_evaluation_metrics: detection.VideoObjectTrackingEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=14, + oneof='metrics', + message=detection.VideoObjectTrackingEvaluationMetrics, + ) + text_sentiment_evaluation_metrics: text_sentiment.TextSentimentEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=11, + oneof='metrics', + message=text_sentiment.TextSentimentEvaluationMetrics, + ) + text_extraction_evaluation_metrics: text_extraction.TextExtractionEvaluationMetrics = proto.Field( + proto.MESSAGE, + number=13, + oneof='metrics', + message=text_extraction.TextExtractionEvaluationMetrics, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + annotation_spec_id: str = proto.Field( + proto.STRING, + number=2, + ) + display_name: str = proto.Field( + proto.STRING, + number=15, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + evaluated_example_count: int = proto.Field( + proto.INT32, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py new file mode 100644 index 00000000..8916452c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py @@ -0,0 +1,392 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import io +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'OperationMetadata', + 'DeleteOperationMetadata', + 'DeployModelOperationMetadata', + 'UndeployModelOperationMetadata', + 'CreateModelOperationMetadata', + 'ImportDataOperationMetadata', + 'ExportDataOperationMetadata', + 'BatchPredictOperationMetadata', + 'ExportModelOperationMetadata', + 'ExportEvaluatedExamplesOperationMetadata', + }, +) + + +class OperationMetadata(proto.Message): + r"""Metadata used across all long running operations returned by + AutoML API. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + delete_details (google.cloud.automl_v1beta1.types.DeleteOperationMetadata): + Details of a Delete operation. + + This field is a member of `oneof`_ ``details``. + deploy_model_details (google.cloud.automl_v1beta1.types.DeployModelOperationMetadata): + Details of a DeployModel operation. + + This field is a member of `oneof`_ ``details``. + undeploy_model_details (google.cloud.automl_v1beta1.types.UndeployModelOperationMetadata): + Details of an UndeployModel operation. + + This field is a member of `oneof`_ ``details``. + create_model_details (google.cloud.automl_v1beta1.types.CreateModelOperationMetadata): + Details of CreateModel operation. + + This field is a member of `oneof`_ ``details``. + import_data_details (google.cloud.automl_v1beta1.types.ImportDataOperationMetadata): + Details of ImportData operation. + + This field is a member of `oneof`_ ``details``. + batch_predict_details (google.cloud.automl_v1beta1.types.BatchPredictOperationMetadata): + Details of BatchPredict operation. + + This field is a member of `oneof`_ ``details``. + export_data_details (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata): + Details of ExportData operation. + + This field is a member of `oneof`_ ``details``. + export_model_details (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata): + Details of ExportModel operation. + + This field is a member of `oneof`_ ``details``. + export_evaluated_examples_details (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata): + Details of ExportEvaluatedExamples operation. + + This field is a member of `oneof`_ ``details``. + progress_percent (int): + Output only. Progress of operation. Range: [0, 100]. Not + used currently. + partial_failures (MutableSequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + updated for the last time. + """ + + delete_details: 'DeleteOperationMetadata' = proto.Field( + proto.MESSAGE, + number=8, + oneof='details', + message='DeleteOperationMetadata', + ) + deploy_model_details: 'DeployModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=24, + oneof='details', + message='DeployModelOperationMetadata', + ) + undeploy_model_details: 'UndeployModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=25, + oneof='details', + message='UndeployModelOperationMetadata', + ) + create_model_details: 'CreateModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=10, + oneof='details', + message='CreateModelOperationMetadata', + ) + import_data_details: 'ImportDataOperationMetadata' = proto.Field( + proto.MESSAGE, + number=15, + oneof='details', + message='ImportDataOperationMetadata', + ) + batch_predict_details: 'BatchPredictOperationMetadata' = proto.Field( + proto.MESSAGE, + number=16, + oneof='details', + message='BatchPredictOperationMetadata', + ) + export_data_details: 'ExportDataOperationMetadata' = proto.Field( + proto.MESSAGE, + number=21, + oneof='details', + message='ExportDataOperationMetadata', + ) + export_model_details: 'ExportModelOperationMetadata' = proto.Field( + proto.MESSAGE, + number=22, + oneof='details', + message='ExportModelOperationMetadata', + ) + export_evaluated_examples_details: 'ExportEvaluatedExamplesOperationMetadata' = proto.Field( + proto.MESSAGE, + number=26, + oneof='details', + message='ExportEvaluatedExamplesOperationMetadata', + ) + progress_percent: int = proto.Field( + proto.INT32, + number=13, + ) + partial_failures: MutableSequence[status_pb2.Status] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities. + """ + + +class DeployModelOperationMetadata(proto.Message): + r"""Details of DeployModel operation. + """ + + +class UndeployModelOperationMetadata(proto.Message): + r"""Details of UndeployModel operation. + """ + + +class CreateModelOperationMetadata(proto.Message): + r"""Details of CreateModel operation. + """ + + +class ImportDataOperationMetadata(proto.Message): + r"""Details of ImportData operation. + """ + + +class ExportDataOperationMetadata(proto.Message): + r"""Details of ExportData operation. + + Attributes: + output_info (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata.ExportDataOutputInfo): + Output only. Information further describing + this export data's output. + """ + + class ExportDataOutputInfo(proto.Message): + r"""Further describes this export data's output. Supplements + [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the exported data + is written. + + This field is a member of `oneof`_ ``output_location``. + bigquery_output_dataset (str): + The path of the BigQuery dataset created, in + bq://projectId.bqDatasetId format, into which + the exported data is written. + + This field is a member of `oneof`_ ``output_location``. + """ + + gcs_output_directory: str = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + bigquery_output_dataset: str = proto.Field( + proto.STRING, + number=2, + oneof='output_location', + ) + + output_info: ExportDataOutputInfo = proto.Field( + proto.MESSAGE, + number=1, + message=ExportDataOutputInfo, + ) + + +class BatchPredictOperationMetadata(proto.Message): + r"""Details of BatchPredict operation. + + Attributes: + input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): + Output only. The input config that was given + upon starting this batch predict operation. + output_info (google.cloud.automl_v1beta1.types.BatchPredictOperationMetadata.BatchPredictOutputInfo): + Output only. Information further describing + this batch predict's output. + """ + + class BatchPredictOutputInfo(proto.Message): + r"""Further describes this batch predict's output. Supplements + + [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the prediction + output is written. + + This field is a member of `oneof`_ ``output_location``. + bigquery_output_dataset (str): + The path of the BigQuery dataset created, in + bq://projectId.bqDatasetId format, into which + the prediction output is written. + + This field is a member of `oneof`_ ``output_location``. + """ + + gcs_output_directory: str = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + bigquery_output_dataset: str = proto.Field( + proto.STRING, + number=2, + oneof='output_location', + ) + + input_config: io.BatchPredictInputConfig = proto.Field( + proto.MESSAGE, + number=1, + message=io.BatchPredictInputConfig, + ) + output_info: BatchPredictOutputInfo = proto.Field( + proto.MESSAGE, + number=2, + message=BatchPredictOutputInfo, + ) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of ExportModel operation. + + Attributes: + output_info (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata.ExportModelOutputInfo): + Output only. Information further describing + the output of this model export. + """ + + class ExportModelOutputInfo(proto.Message): + r"""Further describes the output of model export. Supplements + + [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. + + Attributes: + gcs_output_directory (str): + The full path of the Google Cloud Storage + directory created, into which the model will be + exported. + """ + + gcs_output_directory: str = proto.Field( + proto.STRING, + number=1, + ) + + output_info: ExportModelOutputInfo = proto.Field( + proto.MESSAGE, + number=2, + message=ExportModelOutputInfo, + ) + + +class ExportEvaluatedExamplesOperationMetadata(proto.Message): + r"""Details of EvaluatedExamples operation. + + Attributes: + output_info (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo): + Output only. Information further describing + the output of this evaluated examples export. + """ + + class ExportEvaluatedExamplesOutputInfo(proto.Message): + r"""Further describes the output of the evaluated examples export. + Supplements + + [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig]. + + Attributes: + bigquery_output_dataset (str): + The path of the BigQuery dataset created, in + bq://projectId.bqDatasetId format, into which + the output of export evaluated examples is + written. + """ + + bigquery_output_dataset: str = proto.Field( + proto.STRING, + number=2, + ) + + output_info: ExportEvaluatedExamplesOutputInfo = proto.Field( + proto.MESSAGE, + number=2, + message=ExportEvaluatedExamplesOutputInfo, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py new file mode 100644 index 00000000..d4a9abaa --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py @@ -0,0 +1,285 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import io + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'PredictRequest', + 'PredictResponse', + 'BatchPredictRequest', + 'BatchPredictResult', + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the prediction. + payload (google.cloud.automl_v1beta1.types.ExamplePayload): + Required. Payload to perform a prediction on. + The payload must match the problem type that the + model was trained to solve. + params (MutableMapping[str, str]): + Additional domain-specific parameters, any string must be up + to 25000 characters long. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + - For Image Object Detection: ``score_threshold`` - (float) + When Model detects objects on the image, it will only + produce bounding boxes which have at least this + confidence score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than this + number of bounding boxes will be returned in the + response. Default is 100, the requested value may be + limited by server. + + - For Tables: feature_importance - (boolean) Whether + feature importance should be populated in the returned + TablesAnnotation. The default is false. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + payload: data_items.ExamplePayload = proto.Field( + proto.MESSAGE, + number=2, + message=data_items.ExamplePayload, + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. + + Attributes: + payload (MutableSequence[google.cloud.automl_v1beta1.types.AnnotationPayload]): + Prediction result. + Translation and Text Sentiment will return + precisely one payload. + preprocessed_input (google.cloud.automl_v1beta1.types.ExamplePayload): + The preprocessed example that AutoML actually makes + prediction on. Empty if AutoML does not preprocess the input + example. + + - For Text Extraction: If the input is a .pdf file, the + OCR'ed text will be provided in + [document_text][google.cloud.automl.v1beta1.Document.document_text]. + metadata (MutableMapping[str, str]): + Additional domain-specific prediction response metadata. + + - For Image Object Detection: ``max_bounding_box_count`` - + (int64) At most that many bounding boxes per image could + have been returned. + + - For Text Sentiment: ``sentiment_score`` - (float, + deprecated) A value between -1 and 1, -1 maps to least + positive sentiment, while 1 maps to the most positive one + and the higher the score, the more positive the sentiment + in the document is. Yet these values are relative to the + training data, so e.g. if all data was positive then -1 + will be also positive (though the least). The + sentiment_score shouldn't be confused with "score" or + "magnitude" from the previous Natural Language Sentiment + Analysis API. + """ + + payload: MutableSequence[annotation_payload.AnnotationPayload] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=annotation_payload.AnnotationPayload, + ) + preprocessed_input: data_items.ExamplePayload = proto.Field( + proto.MESSAGE, + number=3, + message=data_items.ExamplePayload, + ) + metadata: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + + +class BatchPredictRequest(proto.Message): + r"""Request message for + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + Attributes: + name (str): + Required. Name of the model requested to + serve the batch prediction. + input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): + Required. The input configuration for batch + prediction. + output_config (google.cloud.automl_v1beta1.types.BatchPredictOutputConfig): + Required. The Configuration specifying where + output predictions should be written. + params (MutableMapping[str, str]): + Required. Additional domain-specific parameters for the + predictions, any string must be up to 25000 characters long. + + - For Text Classification: + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for a text snippet, it + will only produce results that have at least this + confidence score. The default is 0.5. + + - For Image Classification: + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for an image, it will + only produce results that have at least this confidence + score. The default is 0.5. + + - For Image Object Detection: + + ``score_threshold`` - (float) When Model detects objects + on the image, it will only produce bounding boxes which + have at least this confidence score. Value in 0 to 1 + range, default is 0.5. ``max_bounding_box_count`` - + (int64) No more than this number of bounding boxes will + be produced per image. Default is 100, the requested + value may be limited by server. + + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to 1.0. + When the model makes predictions for a video, it will + only produce results that have at least this confidence + score. The default is 0.5. ``segment_classification`` - + (boolean) Set to true to request segment-level + classification. AutoML Video Intelligence returns labels + and their confidence scores for the entire segment of the + video that user specified in the request configuration. + The default is "true". ``shot_classification`` - + (boolean) Set to true to request shot-level + classification. AutoML Video Intelligence determines the + boundaries for each camera shot in the entire segment of + the video that user specified in the request + configuration. AutoML Video Intelligence then returns + labels and their confidence scores for each detected + shot, along with the start and end time of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. The default is "false". + ``1s_interval_classification`` - (boolean) Set to true to + request classification for a video at one-second + intervals. AutoML Video Intelligence returns labels and + their confidence scores for each second of the entire + segment of the video that user specified in the request + configuration. WARNING: Model evaluation is not done for + this classification type, the quality of it depends on + training data, but there are no metrics provided to + describe that quality. The default is "false". + + - For Tables: + + feature_importance - (boolean) Whether feature importance + should be populated in the returned TablesAnnotations. + The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects objects + on video frames, it will only produce bounding boxes + which have at least this confidence score. Value in 0 to + 1 range, default is 0.5. ``max_bounding_box_count`` - + (int64) No more than this number of bounding boxes will + be returned per frame. Default is 100, the requested + value may be limited by server. ``min_bounding_box_size`` + - (float) Only bounding boxes with shortest edge at least + that long as a relative value of video frame size will be + returned. Value in 0 to 1 range. Default is 0. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + input_config: io.BatchPredictInputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.BatchPredictInputConfig, + ) + output_config: io.BatchPredictOutputConfig = proto.Field( + proto.MESSAGE, + number=4, + message=io.BatchPredictOutputConfig, + ) + params: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + + +class BatchPredictResult(proto.Message): + r"""Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of the operation + returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + + Attributes: + metadata (MutableMapping[str, str]): + Additional domain-specific prediction response metadata. + + - For Image Object Detection: ``max_bounding_box_count`` - + (int64) At most that many bounding boxes per image could + have been returned. + + - For Video Object Tracking: ``max_bounding_box_count`` - + (int64) At most that many bounding boxes per frame could + have been returned. + """ + + metadata: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py new file mode 100644 index 00000000..262e14b0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'DoubleRange', + }, +) + + +class DoubleRange(proto.Message): + r"""A range between two double numbers. + + Attributes: + start (float): + Start of the range, inclusive. + end (float): + End of the range, exclusive. + """ + + start: float = proto.Field( + proto.DOUBLE, + number=1, + ) + end: float = proto.Field( + proto.DOUBLE, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py new file mode 100644 index 00000000..123eda6c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'RegressionEvaluationMetrics', + }, +) + + +class RegressionEvaluationMetrics(proto.Message): + r"""Metrics for regression problems. + + Attributes: + root_mean_squared_error (float): + Output only. Root Mean Squared Error (RMSE). + mean_absolute_error (float): + Output only. Mean Absolute Error (MAE). + mean_absolute_percentage_error (float): + Output only. Mean absolute percentage error. + Only set if all ground truth values are are + positive. + r_squared (float): + Output only. R squared. + root_mean_squared_log_error (float): + Output only. Root mean squared log error. + """ + + root_mean_squared_error: float = proto.Field( + proto.FLOAT, + number=1, + ) + mean_absolute_error: float = proto.Field( + proto.FLOAT, + number=2, + ) + mean_absolute_percentage_error: float = proto.Field( + proto.FLOAT, + number=3, + ) + r_squared: float = proto.Field( + proto.FLOAT, + number=4, + ) + root_mean_squared_log_error: float = proto.Field( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py new file mode 100644 index 00000000..28dd4971 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py @@ -0,0 +1,874 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'CreateDatasetRequest', + 'GetDatasetRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeleteDatasetRequest', + 'ImportDataRequest', + 'ExportDataRequest', + 'GetAnnotationSpecRequest', + 'GetTableSpecRequest', + 'ListTableSpecsRequest', + 'ListTableSpecsResponse', + 'UpdateTableSpecRequest', + 'GetColumnSpecRequest', + 'ListColumnSpecsRequest', + 'ListColumnSpecsResponse', + 'UpdateColumnSpecRequest', + 'CreateModelRequest', + 'GetModelRequest', + 'ListModelsRequest', + 'ListModelsResponse', + 'DeleteModelRequest', + 'DeployModelRequest', + 'UndeployModelRequest', + 'ExportModelRequest', + 'ExportEvaluatedExamplesRequest', + 'GetModelEvaluationRequest', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the project to + create the dataset for. + dataset (google.cloud.automl_v1beta1.types.Dataset): + Required. The dataset to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + dataset: gca_dataset.Dataset = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.Dataset, + ) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + retrieve. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Attributes: + parent (str): + Required. The resource name of the project + from which to list datasets. + filter (str): + An expression for filtering the results of the request. + + - ``dataset_metadata`` - for existence of the case (e.g. + ``image_classification_dataset_metadata:*``). Some + examples of using the filter are: + + - ``translation_dataset_metadata:*`` --> The dataset has + ``translation_dataset_metadata``. + page_size (int): + Requested page size. Server may return fewer + results than requested. If unspecified, server + will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListDatasetsResponse.next_page_token][google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_token] + of the previous + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. + + Attributes: + datasets (MutableSequence[google.cloud.automl_v1beta1.types.Dataset]): + The datasets read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListDatasetsRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + datasets: MutableSequence[gca_dataset.Dataset] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] + + Attributes: + dataset (google.cloud.automl_v1beta1.types.Dataset): + Required. The dataset which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. + """ + + dataset: gca_dataset.Dataset = proto.Field( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the dataset to + delete. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. + + Attributes: + name (str): + Required. Dataset name. Dataset must already + exist. All imported annotations and examples + will be added. + input_config (google.cloud.automl_v1beta1.types.InputConfig): + Required. The desired input location and its + domain specific semantics, if any. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + input_config: io.InputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.InputConfig, + ) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. + + Attributes: + name (str): + Required. The resource name of the dataset. + output_config (google.cloud.automl_v1beta1.types.OutputConfig): + Required. The desired output location. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + output_config: io.OutputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.OutputConfig, + ) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The resource name of the annotation + spec to retrieve. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class GetTableSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. + + Attributes: + name (str): + Required. The resource name of the table spec + to retrieve. + field_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + field_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListTableSpecsRequest(proto.Message): + r"""Request message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Attributes: + parent (str): + Required. The resource name of the dataset to + list table specs from. + field_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + filter (str): + Filter expression, see go/filtering. + page_size (int): + Requested page size. The server can return + fewer results than requested. If unspecified, + the server will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained from the + [ListTableSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListTableSpecsResponse.next_page_token] + field of the previous + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + field_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListTableSpecsResponse(proto.Message): + r"""Response message for + [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. + + Attributes: + table_specs (MutableSequence[google.cloud.automl_v1beta1.types.TableSpec]): + The table specs read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListTableSpecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSpecsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + table_specs: MutableSequence[gca_table_spec.TableSpec] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_table_spec.TableSpec, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTableSpecRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] + + Attributes: + table_spec (google.cloud.automl_v1beta1.types.TableSpec): + Required. The table spec which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. + """ + + table_spec: gca_table_spec.TableSpec = proto.Field( + proto.MESSAGE, + number=1, + message=gca_table_spec.TableSpec, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class GetColumnSpecRequest(proto.Message): + r"""Request message for + [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. + + Attributes: + name (str): + Required. The resource name of the column + spec to retrieve. + field_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + field_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListColumnSpecsRequest(proto.Message): + r"""Request message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Attributes: + parent (str): + Required. The resource name of the table spec + to list column specs from. + field_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + filter (str): + Filter expression, see go/filtering. + page_size (int): + Requested page size. The server can return + fewer results than requested. If unspecified, + the server will pick a default size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained from the + [ListColumnSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListColumnSpecsResponse.next_page_token] + field of the previous + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + field_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListColumnSpecsResponse(proto.Message): + r"""Response message for + [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. + + Attributes: + column_specs (MutableSequence[google.cloud.automl_v1beta1.types.ColumnSpec]): + The column specs read. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListColumnSpecsRequest.page_token][google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + column_specs: MutableSequence[gca_column_spec.ColumnSpec] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_column_spec.ColumnSpec, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateColumnSpecRequest(proto.Message): + r"""Request message for + [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] + + Attributes: + column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): + Required. The column spec which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. + """ + + column_spec: gca_column_spec.ColumnSpec = proto.Field( + proto.MESSAGE, + number=1, + message=gca_column_spec.ColumnSpec, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class CreateModelRequest(proto.Message): + r"""Request message for + [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. + + Attributes: + parent (str): + Required. Resource name of the parent project + where the model is being created. + model (google.cloud.automl_v1beta1.types.Model): + Required. The model to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + model: gca_model.Model = proto.Field( + proto.MESSAGE, + number=4, + message=gca_model.Model, + ) + + +class GetModelRequest(proto.Message): + r"""Request message for + [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. + + Attributes: + name (str): + Required. Resource name of the model. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Attributes: + parent (str): + Required. Resource name of the project, from + which to list the models. + filter (str): + An expression for filtering the results of the request. + + - ``model_metadata`` - for existence of the case (e.g. + ``video_classification_model_metadata:*``). + + - ``dataset_id`` - for = or !=. Some examples of using the + filter are: + + - ``image_classification_model_metadata:*`` --> The model + has ``image_classification_model_metadata``. + + - ``dataset_id=5`` --> The model was created from a dataset + with ID 5. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelsResponse.next_page_token] + of the previous + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. + + Attributes: + model (MutableSequence[google.cloud.automl_v1beta1.types.Model]): + List of models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.automl.v1beta1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model: MutableSequence[gca_model.Model] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. + + Attributes: + name (str): + Required. Resource name of the model being + deleted. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + image_object_detection_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata): + Model deployment metadata specific to Image + Object Detection. + + This field is a member of `oneof`_ ``model_deployment_metadata``. + image_classification_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata): + Model deployment metadata specific to Image + Classification. + + This field is a member of `oneof`_ ``model_deployment_metadata``. + name (str): + Required. Resource name of the model to + deploy. + """ + + image_object_detection_model_deployment_metadata: image.ImageObjectDetectionModelDeploymentMetadata = proto.Field( + proto.MESSAGE, + number=2, + oneof='model_deployment_metadata', + message=image.ImageObjectDetectionModelDeploymentMetadata, + ) + image_classification_model_deployment_metadata: image.ImageClassificationModelDeploymentMetadata = proto.Field( + proto.MESSAGE, + number=4, + oneof='model_deployment_metadata', + message=image.ImageClassificationModelDeploymentMetadata, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. + + Attributes: + name (str): + Required. Resource name of the model to + undeploy. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an error code + will be returned. + + Attributes: + name (str): + Required. The resource name of the model to + export. + output_config (google.cloud.automl_v1beta1.types.ModelExportOutputConfig): + Required. The desired output location and + configuration. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + output_config: io.ModelExportOutputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.ModelExportOutputConfig, + ) + + +class ExportEvaluatedExamplesRequest(proto.Message): + r"""Request message for + [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. + + Attributes: + name (str): + Required. The resource name of the model + whose evaluated examples are to be exported. + output_config (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig): + Required. The desired output location and + configuration. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + output_config: io.ExportEvaluatedExamplesOutputConfig = proto.Field( + proto.MESSAGE, + number=3, + message=io.ExportEvaluatedExamplesOutputConfig, + ) + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. + + Attributes: + name (str): + Required. Resource name for the model + evaluation. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Attributes: + parent (str): + Required. Resource name of the model to list + the model evaluations for. If modelId is set as + "-", this will list model evaluations from + across all models of the parent location. + filter (str): + An expression for filtering the results of the request. + + - ``annotation_spec_id`` - for =, != or existence. See + example below for the last. + + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation was + done for annotation spec with ID different than 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation was + done for aggregate of all annotation specs. + page_size (int): + Requested page size. + page_token (str): + A token identifying a page of results for the server to + return. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelEvaluationsResponse.next_page_token] + of the previous + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. + + Attributes: + model_evaluation (MutableSequence[google.cloud.automl_v1beta1.types.ModelEvaluation]): + List of model evaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to the + [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_token] + field of a new + [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] + request to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation: MutableSequence[gca_model_evaluation.ModelEvaluation] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_evaluation.ModelEvaluation, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py new file mode 100644 index 00000000..52789421 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import io + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TableSpec', + }, +) + + +class TableSpec(proto.Message): + r"""A specification of a relational table. The table's schema is + represented via its child column specs. It is pre-populated as part + of ImportData by schema inference algorithm, the version of which is + a required parameter of ImportData InputConfig. Note: While working + with a table, at times the schema may be inconsistent with the data + in the table (e.g. string in a FLOAT64 column). The consistency + validation is done upon creation of a model. Used by: + + - Tables + + Attributes: + name (str): + Output only. The resource name of the table spec. Form: + + ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}`` + time_column_spec_id (str): + column_spec_id of the time column. Only used if the parent + dataset's ml_use_column_spec_id is not set. Used to split + rows into TRAIN, VALIDATE and TEST sets such that oldest + rows go to TRAIN set, newest to TEST, and those in between + to VALIDATE. Required type: TIMESTAMP. If both this column + and ml_use_column are not set, then ML use of all rows will + be assigned by AutoML. NOTE: Updates of this field will + instantly affect any other users concurrently working with + the dataset. + row_count (int): + Output only. The number of rows (i.e. + examples) in the table. + valid_row_count (int): + Output only. The number of valid rows (i.e. + without values that don't match DataType-s of + their columns). + column_count (int): + Output only. The number of columns of the + table. That is, the number of child + ColumnSpec-s. + input_configs (MutableSequence[google.cloud.automl_v1beta1.types.InputConfig]): + Output only. Input configs via which data + currently residing in the table had been + imported. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + time_column_spec_id: str = proto.Field( + proto.STRING, + number=2, + ) + row_count: int = proto.Field( + proto.INT64, + number=3, + ) + valid_row_count: int = proto.Field( + proto.INT64, + number=4, + ) + column_count: int = proto.Field( + proto.INT64, + number=7, + ) + input_configs: MutableSequence[io.InputConfig] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=io.InputConfig, + ) + etag: str = proto.Field( + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py new file mode 100644 index 00000000..46649fb7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py @@ -0,0 +1,426 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import ranges +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TablesDatasetMetadata', + 'TablesModelMetadata', + 'TablesAnnotation', + 'TablesModelColumnInfo', + }, +) + + +class TablesDatasetMetadata(proto.Message): + r"""Metadata for a dataset used for AutoML Tables. + + Attributes: + primary_table_spec_id (str): + Output only. The table_spec_id of the primary table of this + dataset. + target_column_spec_id (str): + column_spec_id of the primary table's column that should be + used as the training & prediction target. This column must + be non-nullable and have one of following data types + (otherwise model creation will error): + + - CATEGORY + + - FLOAT64 + + If the type is CATEGORY , only up to 100 unique values may + exist in that column across all rows. + + NOTE: Updates of this field will instantly affect any other + users concurrently working with the dataset. + weight_column_spec_id (str): + column_spec_id of the primary table's column that should be + used as the weight column, i.e. the higher the value the + more important the row will be during model training. + Required type: FLOAT64. Allowed values: 0 to 10000, + inclusive on both ends; 0 means the row is ignored for + training. If not set all rows are assumed to have equal + weight of 1. NOTE: Updates of this field will instantly + affect any other users concurrently working with the + dataset. + ml_use_column_spec_id (str): + column_spec_id of the primary table column which specifies a + possible ML use of the row, i.e. the column will be used to + split the rows into TRAIN, VALIDATE and TEST sets. Required + type: STRING. This column, if set, must either have all of + ``TRAIN``, ``VALIDATE``, ``TEST`` among its values, or only + have ``TEST``, ``UNASSIGNED`` values. In the latter case the + rows with ``UNASSIGNED`` value will be assigned by AutoML. + Note that if a given ml use distribution makes it impossible + to create a "good" model, that call will error describing + the issue. If both this column_spec_id and primary table's + time_column_spec_id are not set, then all rows are treated + as ``UNASSIGNED``. NOTE: Updates of this field will + instantly affect any other users concurrently working with + the dataset. + target_column_correlations (MutableMapping[str, google.cloud.automl_v1beta1.types.CorrelationStats]): + Output only. Correlations between + + [TablesDatasetMetadata.target_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_spec_id], + and other columns of the + + [TablesDatasetMetadataprimary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id]. + Only set if the target column is set. Mapping from other + column spec id to its CorrelationStats with the target + column. This field may be stale, see the stats_update_time + field for for the timestamp at which these stats were last + updated. + stats_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The most recent timestamp when + target_column_correlations field and all descendant + ColumnSpec.data_stats and ColumnSpec.top_correlated_columns + fields were last (re-)generated. Any changes that happened + to the dataset afterwards are not reflected in these fields + values. The regeneration happens in the background on a best + effort basis. + """ + + primary_table_spec_id: str = proto.Field( + proto.STRING, + number=1, + ) + target_column_spec_id: str = proto.Field( + proto.STRING, + number=2, + ) + weight_column_spec_id: str = proto.Field( + proto.STRING, + number=3, + ) + ml_use_column_spec_id: str = proto.Field( + proto.STRING, + number=4, + ) + target_column_correlations: MutableMapping[str, data_stats.CorrelationStats] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=6, + message=data_stats.CorrelationStats, + ) + stats_update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + + +class TablesModelMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "MAXIMIZE_PRECISION_AT_RECALL". Must be between 0 and 1, + inclusive. + + This field is a member of `oneof`_ ``additional_optimization_objective_config``. + optimization_objective_precision_value (float): + Required when optimization_objective is + "MAXIMIZE_RECALL_AT_PRECISION". Must be between 0 and 1, + inclusive. + + This field is a member of `oneof`_ ``additional_optimization_objective_config``. + target_column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): + Column spec of the dataset's primary table's column the + model is predicting. Snapshotted when model creation + started. Only 3 fields are used: name - May be set on + CreateModel, if it's not then the ColumnSpec corresponding + to the current target_column_spec_id of the dataset the + model is trained from is used. If neither is set, + CreateModel will error. display_name - Output only. + data_type - Output only. + input_feature_column_specs (MutableSequence[google.cloud.automl_v1beta1.types.ColumnSpec]): + Column specs of the dataset's primary table's columns, on + which the model is trained and which are used as the input + for predictions. The + + [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + as well as, according to dataset's state upon model + creation, + + [weight_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spec_id], + and + + [ml_use_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.ml_use_column_spec_id] + must never be included here. + + Only 3 fields are used: + + - name - May be set on CreateModel, if set only the columns + specified are used, otherwise all primary table's columns + (except the ones listed above) are used for the training + and prediction input. + + - display_name - Output only. + + - data_type - Output only. + optimization_objective (str): + Objective function the model is optimizing towards. The + training process creates a model that maximizes/minimizes + the value of the objective function over the validation set. + + The supported optimization objectives depend on the + prediction type. If the field is not set, a default + objective function is used. + + CLASSIFICATION_BINARY: "MAXIMIZE_AU_ROC" (default) - + Maximize the area under the receiver operating + characteristic (ROC) curve. "MINIMIZE_LOG_LOSS" - Minimize + log loss. "MAXIMIZE_AU_PRC" - Maximize the area under the + precision-recall curve. "MAXIMIZE_PRECISION_AT_RECALL" - + Maximize precision for a specified recall value. + "MAXIMIZE_RECALL_AT_PRECISION" - Maximize recall for a + specified precision value. + + CLASSIFICATION_MULTI_CLASS : "MINIMIZE_LOG_LOSS" (default) - + Minimize log loss. + + REGRESSION: "MINIMIZE_RMSE" (default) - Minimize + root-mean-squared error (RMSE). "MINIMIZE_MAE" - Minimize + mean-absolute error (MAE). "MINIMIZE_RMSLE" - Minimize + root-mean-squared log error (RMSLE). + tables_model_column_info (MutableSequence[google.cloud.automl_v1beta1.types.TablesModelColumnInfo]): + Output only. Auxiliary information for each of the + input_feature_column_specs with respect to this particular + model. + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + """ + + optimization_objective_recall_value: float = proto.Field( + proto.FLOAT, + number=17, + oneof='additional_optimization_objective_config', + ) + optimization_objective_precision_value: float = proto.Field( + proto.FLOAT, + number=18, + oneof='additional_optimization_objective_config', + ) + target_column_spec: column_spec.ColumnSpec = proto.Field( + proto.MESSAGE, + number=2, + message=column_spec.ColumnSpec, + ) + input_feature_column_specs: MutableSequence[column_spec.ColumnSpec] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=column_spec.ColumnSpec, + ) + optimization_objective: str = proto.Field( + proto.STRING, + number=4, + ) + tables_model_column_info: MutableSequence['TablesModelColumnInfo'] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message='TablesModelColumnInfo', + ) + train_budget_milli_node_hours: int = proto.Field( + proto.INT64, + number=6, + ) + train_cost_milli_node_hours: int = proto.Field( + proto.INT64, + number=7, + ) + disable_early_stopping: bool = proto.Field( + proto.BOOL, + number=12, + ) + + +class TablesAnnotation(proto.Message): + r"""Contains annotation details specific to Tables. + + Attributes: + score (float): + Output only. A confidence estimate between 0.0 and 1.0, + inclusive. A higher value means greater confidence in the + returned value. For + + [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + of FLOAT64 data type the score is not populated. + prediction_interval (google.cloud.automl_v1beta1.types.DoubleRange): + Output only. Only populated when + + [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + has FLOAT64 data type. An interval in which the exactly + correct target value has 95% chance to be in. + value (google.protobuf.struct_pb2.Value): + The predicted value of the row's + + [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]. + The value depends on the column's DataType: + + - CATEGORY - the predicted (with the above confidence + ``score``) CATEGORY value. + + - FLOAT64 - the predicted (with above + ``prediction_interval``) FLOAT64 value. + tables_model_column_info (MutableSequence[google.cloud.automl_v1beta1.types.TablesModelColumnInfo]): + Output only. Auxiliary information for each of the model's + + [input_feature_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] + with respect to this particular prediction. If no other + fields than + + [column_spec_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_spec_name] + and + + [column_display_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name] + would be populated, then this whole field is not. + baseline_score (float): + Output only. Stores the prediction score for + the baseline example, which is defined as the + example with all values set to their baseline + values. This is used as part of the Sampled + Shapley explanation of the model's prediction. + This field is populated only when feature + importance is requested. For regression models, + this holds the baseline prediction for the + baseline example. For classification models, + this holds the baseline prediction for the + baseline example for the argmax class. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + ) + prediction_interval: ranges.DoubleRange = proto.Field( + proto.MESSAGE, + number=4, + message=ranges.DoubleRange, + ) + value: struct_pb2.Value = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + tables_model_column_info: MutableSequence['TablesModelColumnInfo'] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='TablesModelColumnInfo', + ) + baseline_score: float = proto.Field( + proto.FLOAT, + number=5, + ) + + +class TablesModelColumnInfo(proto.Message): + r"""An information specific to given column and Tables Model, in + context of the Model and the predictions created by it. + + Attributes: + column_spec_name (str): + Output only. The name of the ColumnSpec + describing the column. Not populated when this + proto is outputted to BigQuery. + column_display_name (str): + Output only. The display name of the column (same as the + display_name of its ColumnSpec). + feature_importance (float): + Output only. When given as part of a Model (always + populated): Measurement of how much model predictions + correctness on the TEST data depend on values in this + column. A value between 0 and 1, higher means higher + influence. These values are normalized - for all input + feature columns of a given model they add to 1. + + When given back by Predict (populated iff + [feature_importance + param][google.cloud.automl.v1beta1.PredictRequest.params] is + set) or Batch Predict (populated iff + [feature_importance][google.cloud.automl.v1beta1.PredictRequest.params] + param is set): Measurement of how impactful for the + prediction returned for the given row the value in this + column was. Specifically, the feature importance specifies + the marginal contribution that the feature made to the + prediction score compared to the baseline score. These + values are computed using the Sampled Shapley method. + """ + + column_spec_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_display_name: str = proto.Field( + proto.STRING, + number=2, + ) + feature_importance: float = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py new file mode 100644 index 00000000..7bb71eb5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TimeSegment', + }, +) + + +class TimeSegment(proto.Message): + r"""A time period inside of an example that has a time dimension + (e.g. video). + + Attributes: + start_time_offset (google.protobuf.duration_pb2.Duration): + Start of the time segment (inclusive), + represented as the duration since the example + start. + end_time_offset (google.protobuf.duration_pb2.Duration): + End of the time segment (exclusive), + represented as the duration since the example + start. + """ + + start_time_offset: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + end_time_offset: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py new file mode 100644 index 00000000..9a59bf8d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import classification + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TextClassificationDatasetMetadata', + 'TextClassificationModelMetadata', + 'TextExtractionDatasetMetadata', + 'TextExtractionModelMetadata', + 'TextSentimentDatasetMetadata', + 'TextSentimentModelMetadata', + }, +) + + +class TextClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata for classification. + + Attributes: + classification_type (google.cloud.automl_v1beta1.types.ClassificationType): + Required. Type of the classification problem. + """ + + classification_type: classification.ClassificationType = proto.Field( + proto.ENUM, + number=1, + enum=classification.ClassificationType, + ) + + +class TextClassificationModelMetadata(proto.Message): + r"""Model metadata that is specific to text classification. + + Attributes: + classification_type (google.cloud.automl_v1beta1.types.ClassificationType): + Output only. Classification type of the + dataset used to train this model. + """ + + classification_type: classification.ClassificationType = proto.Field( + proto.ENUM, + number=3, + enum=classification.ClassificationType, + ) + + +class TextExtractionDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to text extraction + """ + + +class TextExtractionModelMetadata(proto.Message): + r"""Model metadata that is specific to text extraction. + + Attributes: + model_hint (str): + Indicates the scope of model use case. + + - ``default``: Use to train a general text extraction + model. Default value. + + - ``health_care``: Use to train a text extraction model + that is tuned for healthcare applications. + """ + + model_hint: str = proto.Field( + proto.STRING, + number=3, + ) + + +class TextSentimentDatasetMetadata(proto.Message): + r"""Dataset metadata for text sentiment. + + Attributes: + sentiment_max (int): + Required. A sentiment is expressed as an integer ordinal, + where higher value means a more positive sentiment. The + range of sentiments that will be used is between 0 and + sentiment_max (inclusive on both ends), and all the values + in the range must be represented in the dataset before a + model can be created. sentiment_max value must be between 1 + and 10 (inclusive). + """ + + sentiment_max: int = proto.Field( + proto.INT32, + number=1, + ) + + +class TextSentimentModelMetadata(proto.Message): + r"""Model metadata that is specific to text sentiment. + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py new file mode 100644 index 00000000..5c10ff54 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TextExtractionAnnotation', + 'TextExtractionEvaluationMetrics', + }, +) + + +class TextExtractionAnnotation(proto.Message): + r"""Annotation for identifying spans of text. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text_segment (google.cloud.automl_v1beta1.types.TextSegment): + An entity annotation will set this, which is + the part of the original text to which the + annotation pertains. + + This field is a member of `oneof`_ ``annotation``. + score (float): + Output only. A confidence estimate between + 0.0 and 1.0. A higher value means greater + confidence in correctness of the annotation. + """ + + text_segment: gca_text_segment.TextSegment = proto.Field( + proto.MESSAGE, + number=3, + oneof='annotation', + message=gca_text_segment.TextSegment, + ) + score: float = proto.Field( + proto.FLOAT, + number=1, + ) + + +class TextExtractionEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text extraction problems. + + Attributes: + au_prc (float): + Output only. The Area under precision recall + curve metric. + confidence_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): + Output only. Metrics that have confidence + thresholds. Precision-recall curve can be + derived from it. + """ + + class ConfidenceMetricsEntry(proto.Message): + r"""Metrics for a single confidence threshold. + + Attributes: + confidence_threshold (float): + Output only. The confidence threshold value + used to compute the metrics. Only annotations + with score of at least this threshold are + considered to be ones the model would return. + recall (float): + Output only. Recall under the given + confidence threshold. + precision (float): + Output only. Precision under the given + confidence threshold. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + """ + + confidence_threshold: float = proto.Field( + proto.FLOAT, + number=1, + ) + recall: float = proto.Field( + proto.FLOAT, + number=3, + ) + precision: float = proto.Field( + proto.FLOAT, + number=4, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=5, + ) + + au_prc: float = proto.Field( + proto.FLOAT, + number=1, + ) + confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=ConfidenceMetricsEntry, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py new file mode 100644 index 00000000..86b9feb8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TextSegment', + }, +) + + +class TextSegment(proto.Message): + r"""A contiguous part of a text (string), assuming it has an + UTF-8 NFC encoding. + + Attributes: + content (str): + Output only. The content of the TextSegment. + start_offset (int): + Required. Zero-based character index of the + first character of the text segment (counting + characters from the beginning of the text). + end_offset (int): + Required. Zero-based character index of the first character + past the end of the text segment (counting character from + the beginning of the text). The character at the end_offset + is NOT included in the text segment. + """ + + content: str = proto.Field( + proto.STRING, + number=3, + ) + start_offset: int = proto.Field( + proto.INT64, + number=1, + ) + end_offset: int = proto.Field( + proto.INT64, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py new file mode 100644 index 00000000..49ac3c89 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import classification + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TextSentimentAnnotation', + 'TextSentimentEvaluationMetrics', + }, +) + + +class TextSentimentAnnotation(proto.Message): + r"""Contains annotation details specific to text sentiment. + + Attributes: + sentiment (int): + Output only. The sentiment with the semantic, as given to + the + [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData] + when populating the dataset from which the model used for + the prediction had been trained. The sentiment values are + between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive), with higher value meaning more positive + sentiment. They are completely relative, i.e. 0 means least + positive sentiment and sentiment_max means the most positive + from the sentiments present in the train data. Therefore + e.g. if train data had only negative sentiment, then + sentiment_max, would be still negative (although least + negative). The sentiment shouldn't be confused with "score" + or "magnitude" from the previous Natural Language Sentiment + Analysis API. + """ + + sentiment: int = proto.Field( + proto.INT32, + number=1, + ) + + +class TextSentimentEvaluationMetrics(proto.Message): + r"""Model evaluation metrics for text sentiment problems. + + Attributes: + precision (float): + Output only. Precision. + recall (float): + Output only. Recall. + f1_score (float): + Output only. The harmonic mean of recall and + precision. + mean_absolute_error (float): + Output only. Mean absolute error. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + mean_squared_error (float): + Output only. Mean squared error. Only set for + the overall model evaluation, not for evaluation + of a single annotation spec. + linear_kappa (float): + Output only. Linear weighted kappa. Only set + for the overall model evaluation, not for + evaluation of a single annotation spec. + quadratic_kappa (float): + Output only. Quadratic weighted kappa. Only + set for the overall model evaluation, not for + evaluation of a single annotation spec. + confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix): + Output only. Confusion matrix of the + evaluation. Only set for the overall model + evaluation, not for evaluation of a single + annotation spec. + annotation_spec_id (MutableSequence[str]): + Output only. The annotation spec ids used for + this evaluation. Deprecated . + """ + + precision: float = proto.Field( + proto.FLOAT, + number=1, + ) + recall: float = proto.Field( + proto.FLOAT, + number=2, + ) + f1_score: float = proto.Field( + proto.FLOAT, + number=3, + ) + mean_absolute_error: float = proto.Field( + proto.FLOAT, + number=4, + ) + mean_squared_error: float = proto.Field( + proto.FLOAT, + number=5, + ) + linear_kappa: float = proto.Field( + proto.FLOAT, + number=6, + ) + quadratic_kappa: float = proto.Field( + proto.FLOAT, + number=7, + ) + confusion_matrix: classification.ClassificationEvaluationMetrics.ConfusionMatrix = proto.Field( + proto.MESSAGE, + number=8, + message=classification.ClassificationEvaluationMetrics.ConfusionMatrix, + ) + annotation_spec_id: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py new file mode 100644 index 00000000..3a0ed0a3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.automl_v1beta1.types import data_items + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'TranslationDatasetMetadata', + 'TranslationEvaluationMetrics', + 'TranslationModelMetadata', + 'TranslationAnnotation', + }, +) + + +class TranslationDatasetMetadata(proto.Message): + r"""Dataset metadata that is specific to translation. + + Attributes: + source_language_code (str): + Required. The BCP-47 language code of the + source language. + target_language_code (str): + Required. The BCP-47 language code of the + target language. + """ + + source_language_code: str = proto.Field( + proto.STRING, + number=1, + ) + target_language_code: str = proto.Field( + proto.STRING, + number=2, + ) + + +class TranslationEvaluationMetrics(proto.Message): + r"""Evaluation metrics for the dataset. + + Attributes: + bleu_score (float): + Output only. BLEU score. + base_bleu_score (float): + Output only. BLEU score for base model. + """ + + bleu_score: float = proto.Field( + proto.DOUBLE, + number=1, + ) + base_bleu_score: float = proto.Field( + proto.DOUBLE, + number=2, + ) + + +class TranslationModelMetadata(proto.Message): + r"""Model metadata that is specific to translation. + + Attributes: + base_model (str): + The resource name of the model to use as a baseline to train + the custom model. If unset, we use the default base model + provided by Google Translate. Format: + ``projects/{project_id}/locations/{location_id}/models/{model_id}`` + source_language_code (str): + Output only. Inferred from the dataset. + The source languge (The BCP-47 language code) + that is used for training. + target_language_code (str): + Output only. The target languge (The BCP-47 + language code) that is used for training. + """ + + base_model: str = proto.Field( + proto.STRING, + number=1, + ) + source_language_code: str = proto.Field( + proto.STRING, + number=2, + ) + target_language_code: str = proto.Field( + proto.STRING, + number=3, + ) + + +class TranslationAnnotation(proto.Message): + r"""Annotation details specific to translation. + + Attributes: + translated_content (google.cloud.automl_v1beta1.types.TextSnippet): + Output only . The translated content. + """ + + translated_content: data_items.TextSnippet = proto.Field( + proto.MESSAGE, + number=1, + message=data_items.TextSnippet, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py new file mode 100644 index 00000000..bbb43723 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.automl.v1beta1', + manifest={ + 'VideoClassificationDatasetMetadata', + 'VideoObjectTrackingDatasetMetadata', + 'VideoClassificationModelMetadata', + 'VideoObjectTrackingModelMetadata', + }, +) + + +class VideoClassificationDatasetMetadata(proto.Message): + r"""Dataset metadata specific to video classification. + All Video Classification datasets are treated as multi label. + + """ + + +class VideoObjectTrackingDatasetMetadata(proto.Message): + r"""Dataset metadata specific to video object tracking. + """ + + +class VideoClassificationModelMetadata(proto.Message): + r"""Model metadata specific to video classification. + """ + + +class VideoObjectTrackingModelMetadata(proto.Message): + r"""Model metadata specific to video object tracking. + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini new file mode 100644 index 00000000..574c5aed --- /dev/null +++ b/owl-bot-staging/v1beta1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.7 +namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py new file mode 100644 index 00000000..9b389cd8 --- /dev/null +++ b/owl-bot-staging/v1beta1/noxfile.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +ALL_PYTHON = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", +] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + +BLACK_VERSION = "black==22.3.0" +BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] +DEFAULT_PYTHON_VERSION = "3.11" + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", + "blacken", + "lint", + "lint_setup_py", +] + +@nox.session(python=ALL_PYTHON) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/automl_v1beta1/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=ALL_PYTHON) +def mypy(session): + """Run the type checker.""" + session.install( + 'mypy', + 'types-requests', + 'types-protobuf' + ) + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx==7.0.1", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run("flake8", "google", "tests", "samples") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py new file mode 100644 index 00000000..dec9fb5f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_CreateDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_create_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + response = await client.create_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_CreateDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py new file mode 100644 index 00000000..d15ed0ab --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_CreateDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_create_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + response = client.create_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_CreateDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py new file mode 100644 index 00000000..3e252a04 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_CreateModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_create_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_CreateModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py new file mode 100644 index 00000000..b4d792e7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_CreateModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_create_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.CreateModelRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_CreateModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py new file mode 100644 index 00000000..7b7217f7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_DeleteDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_delete_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_DeleteDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py new file mode 100644 index 00000000..67357242 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_DeleteDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_delete_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_DeleteDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py new file mode 100644 index 00000000..4af55e71 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_DeleteModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_delete_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_DeleteModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py new file mode 100644 index 00000000..68e36405 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_DeleteModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_delete_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_DeleteModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py new file mode 100644 index 00000000..90fb4554 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_DeployModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_deploy_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_DeployModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py new file mode 100644 index 00000000..e9d2baa8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_DeployModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_deploy_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.DeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_DeployModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py new file mode 100644 index 00000000..5ea48ce9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ExportData_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_export_data(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ExportData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py new file mode 100644 index 00000000..45a22a7d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ExportData_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_export_data(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ExportData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py new file mode 100644 index 00000000..64a7f787 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportEvaluatedExamples +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_export_evaluated_examples(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportEvaluatedExamplesRequest( + name="name_value", + ) + + # Make the request + operation = client.export_evaluated_examples(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py new file mode 100644 index 00000000..dbb68da9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportEvaluatedExamples +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_export_evaluated_examples(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportEvaluatedExamplesRequest( + name="name_value", + ) + + # Make the request + operation = client.export_evaluated_examples(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py new file mode 100644 index 00000000..921c44d2 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ExportModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_export_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ExportModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py new file mode 100644 index 00000000..a20bb922 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ExportModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_export_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ExportModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py new file mode 100644 index 00000000..2d14135a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetAnnotationSpec_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_get_annotation_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetAnnotationSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py new file mode 100644 index 00000000..ba2b38b7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetAnnotationSpec_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_get_annotation_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetAnnotationSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py new file mode 100644 index 00000000..03fbf1cc --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetColumnSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetColumnSpec_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_get_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetColumnSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_column_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetColumnSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py new file mode 100644 index 00000000..78d344d1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetColumnSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetColumnSpec_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_get_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetColumnSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_column_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetColumnSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py new file mode 100644 index 00000000..279656f8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_get_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py new file mode 100644 index 00000000..dc179f40 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_get_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py new file mode 100644 index 00000000..9c863417 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_get_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py new file mode 100644 index 00000000..2c04957b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetModelEvaluation_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_get_model_evaluation(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetModelEvaluation_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py new file mode 100644 index 00000000..bcbc2444 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetModelEvaluation_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_get_model_evaluation(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetModelEvaluation_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py new file mode 100644 index 00000000..69cad83d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_get_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py new file mode 100644 index 00000000..41251c76 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTableSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetTableSpec_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_get_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetTableSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_table_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetTableSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py new file mode 100644 index 00000000..954dad4d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTableSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_GetTableSpec_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_get_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.GetTableSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_table_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_GetTableSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py new file mode 100644 index 00000000..79eb9a13 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ImportData_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_import_data(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ImportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ImportData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py new file mode 100644 index 00000000..f52edc81 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ImportData_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_import_data(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ImportDataRequest( + name="name_value", + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_ImportData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py new file mode 100644 index 00000000..58a7edce --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListColumnSpecs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListColumnSpecs_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_list_column_specs(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListColumnSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_column_specs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListColumnSpecs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py new file mode 100644 index 00000000..19b9e9b9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListColumnSpecs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListColumnSpecs_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_list_column_specs(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListColumnSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_column_specs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListColumnSpecs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py new file mode 100644 index 00000000..ceacfb9d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListDatasets_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_list_datasets(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListDatasets_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py new file mode 100644 index 00000000..4cbdd955 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListDatasets_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_list_datasets(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListDatasets_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py new file mode 100644 index 00000000..8a7f4acb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListModelEvaluations_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_list_model_evaluations(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListModelEvaluations_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py new file mode 100644 index 00000000..63bda4b3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListModelEvaluations_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_list_model_evaluations(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListModelEvaluations_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py new file mode 100644 index 00000000..6db13d63 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListModels_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_list_models(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListModels_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py new file mode 100644 index 00000000..16dad314 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListModels_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_list_models(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListModels_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py new file mode 100644 index 00000000..56e95afc --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTableSpecs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListTableSpecs_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_list_table_specs(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListTableSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_table_specs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListTableSpecs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py new file mode 100644 index 00000000..22849c8f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTableSpecs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_ListTableSpecs_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_list_table_specs(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.ListTableSpecsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_table_specs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END automl_v1beta1_generated_AutoMl_ListTableSpecs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py new file mode 100644 index 00000000..be11ea61 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UndeployModel_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_undeploy_model(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UndeployModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py new file mode 100644 index 00000000..6b249d5c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UndeployModel_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_undeploy_model(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.UndeployModelRequest( + name="name_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UndeployModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py new file mode 100644 index 00000000..d0b8afcf --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateColumnSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UpdateColumnSpec_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_update_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateColumnSpecRequest( + ) + + # Make the request + response = await client.update_column_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UpdateColumnSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py new file mode 100644 index 00000000..dc2b8e01 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateColumnSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UpdateColumnSpec_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_update_column_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateColumnSpecRequest( + ) + + # Make the request + response = client.update_column_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UpdateColumnSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py new file mode 100644 index 00000000..34c842e5 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UpdateDataset_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_update_dataset(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UpdateDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py new file mode 100644 index 00000000..90e1a4e3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UpdateDataset_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_update_dataset(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + dataset = automl_v1beta1.Dataset() + dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" + dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" + + request = automl_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UpdateDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py new file mode 100644 index 00000000..a4b47b6c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTableSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UpdateTableSpec_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_update_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateTableSpecRequest( + ) + + # Make the request + response = await client.update_table_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UpdateTableSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py new file mode 100644 index 00000000..7fb68361 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTableSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_AutoMl_UpdateTableSpec_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_update_table_spec(): + # Create a client + client = automl_v1beta1.AutoMlClient() + + # Initialize request argument(s) + request = automl_v1beta1.UpdateTableSpecRequest( + ) + + # Make the request + response = client.update_table_spec(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_AutoMl_UpdateTableSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py new file mode 100644 index 00000000..8526a988 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_PredictionService_BatchPredict_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_batch_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = automl_v1beta1.BatchPredictRequest( + name="name_value", + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_PredictionService_BatchPredict_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py new file mode 100644 index 00000000..2011d22c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_PredictionService_BatchPredict_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_batch_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + request = automl_v1beta1.BatchPredictRequest( + name="name_value", + ) + + # Make the request + operation = client.batch_predict(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_PredictionService_BatchPredict_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py new file mode 100644 index 00000000..e72ceed8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_PredictionService_Predict_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +async def sample_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + payload = automl_v1beta1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1beta1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_PredictionService_Predict_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py new file mode 100644 index 00000000..903c6344 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-automl + + +# [START automl_v1beta1_generated_PredictionService_Predict_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import automl_v1beta1 + + +def sample_predict(): + # Create a client + client = automl_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + payload = automl_v1beta1.ExamplePayload() + payload.image.image_bytes = b'image_bytes_blob' + + request = automl_v1beta1.PredictRequest( + name="name_value", + payload=payload, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + +# [END automl_v1beta1_generated_PredictionService_Predict_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json b/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json new file mode 100644 index 00000000..70c04874 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json @@ -0,0 +1,4289 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.automl.v1beta1", + "version": "v1beta1" + } + ], + "language": "PYTHON", + "name": "google-cloud-automl", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.create_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Dataset", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "automl_v1beta1_generated_auto_ml_create_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_CreateDataset_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_create_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.create_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Dataset", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "automl_v1beta1_generated_auto_ml_create_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_CreateDataset_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_create_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.create_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.CreateModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.automl_v1beta1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_model" + }, + "description": "Sample for CreateModel", + "file": "automl_v1beta1_generated_auto_ml_create_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_CreateModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_create_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.create_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "CreateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.CreateModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.automl_v1beta1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_model" + }, + "description": "Sample for CreateModel", + "file": "automl_v1beta1_generated_auto_ml_create_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_CreateModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_create_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.delete_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "automl_v1beta1_generated_auto_ml_delete_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_DeleteDataset_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_delete_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.delete_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "automl_v1beta1_generated_auto_ml_delete_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_DeleteDataset_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_delete_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.delete_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "automl_v1beta1_generated_auto_ml_delete_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_DeleteModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_delete_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.delete_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "automl_v1beta1_generated_auto_ml_delete_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_DeleteModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_delete_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.deploy_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.DeployModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.DeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "automl_v1beta1_generated_auto_ml_deploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_DeployModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_deploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.deploy_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.DeployModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.DeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "automl_v1beta1_generated_auto_ml_deploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_DeployModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_deploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.export_data", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportData", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "automl_v1beta1_generated_auto_ml_export_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ExportData_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_export_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.export_data", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportData", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "automl_v1beta1_generated_auto_ml_export_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ExportData_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_export_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.export_evaluated_examples", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportEvaluatedExamples" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_evaluated_examples" + }, + "description": "Sample for ExportEvaluatedExamples", + "file": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.export_evaluated_examples", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportEvaluatedExamples" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_evaluated_examples" + }, + "description": "Sample for ExportEvaluatedExamples", + "file": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.export_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.ModelExportOutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "automl_v1beta1_generated_auto_ml_export_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ExportModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_export_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.export_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.ModelExportOutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "automl_v1beta1_generated_auto_ml_export_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ExportModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_export_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetAnnotationSpec_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetAnnotationSpec_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_column_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetColumnSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetColumnSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetColumnSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", + "shortName": "get_column_spec" + }, + "description": "Sample for GetColumnSpec", + "file": "automl_v1beta1_generated_auto_ml_get_column_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetColumnSpec_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_column_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_column_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetColumnSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetColumnSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetColumnSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", + "shortName": "get_column_spec" + }, + "description": "Sample for GetColumnSpec", + "file": "automl_v1beta1_generated_auto_ml_get_column_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetColumnSpec_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_column_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "automl_v1beta1_generated_auto_ml_get_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetDataset_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "automl_v1beta1_generated_auto_ml_get_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetDataset_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetModelEvaluation_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetModelEvaluation_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "automl_v1beta1_generated_auto_ml_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetModel_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "automl_v1beta1_generated_auto_ml_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetModel_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_table_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetTableSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetTableSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetTableSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.TableSpec", + "shortName": "get_table_spec" + }, + "description": "Sample for GetTableSpec", + "file": "automl_v1beta1_generated_auto_ml_get_table_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetTableSpec_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_table_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_table_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.GetTableSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "GetTableSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.GetTableSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.TableSpec", + "shortName": "get_table_spec" + }, + "description": "Sample for GetTableSpec", + "file": "automl_v1beta1_generated_auto_ml_get_table_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_GetTableSpec_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_get_table_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.import_data", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ImportData", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1beta1.types.InputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "automl_v1beta1_generated_auto_ml_import_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ImportData_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_import_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.import_data", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ImportData", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1beta1.types.InputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "automl_v1beta1_generated_auto_ml_import_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ImportData_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_import_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_column_specs", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListColumnSpecs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListColumnSpecsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager", + "shortName": "list_column_specs" + }, + "description": "Sample for ListColumnSpecs", + "file": "automl_v1beta1_generated_auto_ml_list_column_specs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListColumnSpecs_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_column_specs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_column_specs", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListColumnSpecs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListColumnSpecsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsPager", + "shortName": "list_column_specs" + }, + "description": "Sample for ListColumnSpecs", + "file": "automl_v1beta1_generated_auto_ml_list_column_specs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListColumnSpecs_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_column_specs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_datasets", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListDatasets", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "automl_v1beta1_generated_auto_ml_list_datasets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListDatasets_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_datasets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_datasets", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListDatasets", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "automl_v1beta1_generated_auto_ml_list_datasets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListDatasets_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_datasets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListModelEvaluations_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListModelEvaluations_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_models", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModels", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "automl_v1beta1_generated_auto_ml_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListModels_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_models", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModels", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "automl_v1beta1_generated_auto_ml_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListModels_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_models_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_table_specs", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListTableSpecs", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListTableSpecs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListTableSpecsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager", + "shortName": "list_table_specs" + }, + "description": "Sample for ListTableSpecs", + "file": "automl_v1beta1_generated_auto_ml_list_table_specs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListTableSpecs_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_table_specs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_table_specs", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.ListTableSpecs", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "ListTableSpecs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.ListTableSpecsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsPager", + "shortName": "list_table_specs" + }, + "description": "Sample for ListTableSpecs", + "file": "automl_v1beta1_generated_auto_ml_list_table_specs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_ListTableSpecs_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_list_table_specs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.undeploy_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UndeployModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UndeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "automl_v1beta1_generated_auto_ml_undeploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UndeployModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_undeploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.undeploy_model", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UndeployModel", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UndeployModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "automl_v1beta1_generated_auto_ml_undeploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UndeployModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_undeploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.update_column_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateColumnSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest" + }, + { + "name": "column_spec", + "type": "google.cloud.automl_v1beta1.types.ColumnSpec" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", + "shortName": "update_column_spec" + }, + "description": "Sample for UpdateColumnSpec", + "file": "automl_v1beta1_generated_auto_ml_update_column_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UpdateColumnSpec_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_update_column_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.update_column_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateColumnSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest" + }, + { + "name": "column_spec", + "type": "google.cloud.automl_v1beta1.types.ColumnSpec" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", + "shortName": "update_column_spec" + }, + "description": "Sample for UpdateColumnSpec", + "file": "automl_v1beta1_generated_auto_ml_update_column_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UpdateColumnSpec_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_update_column_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.update_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "automl_v1beta1_generated_auto_ml_update_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UpdateDataset_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_update_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.update_dataset", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateDataset", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.automl_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "automl_v1beta1_generated_auto_ml_update_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UpdateDataset_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_update_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", + "shortName": "AutoMlAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.update_table_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateTableSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UpdateTableSpecRequest" + }, + { + "name": "table_spec", + "type": "google.cloud.automl_v1beta1.types.TableSpec" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.TableSpec", + "shortName": "update_table_spec" + }, + "description": "Sample for UpdateTableSpec", + "file": "automl_v1beta1_generated_auto_ml_update_table_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UpdateTableSpec_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_update_table_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.AutoMlClient", + "shortName": "AutoMlClient" + }, + "fullName": "google.cloud.automl_v1beta1.AutoMlClient.update_table_spec", + "method": { + "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec", + "service": { + "fullName": "google.cloud.automl.v1beta1.AutoMl", + "shortName": "AutoMl" + }, + "shortName": "UpdateTableSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.UpdateTableSpecRequest" + }, + { + "name": "table_spec", + "type": "google.cloud.automl_v1beta1.types.TableSpec" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.TableSpec", + "shortName": "update_table_spec" + }, + "description": "Sample for UpdateTableSpec", + "file": "automl_v1beta1_generated_auto_ml_update_table_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_AutoMl_UpdateTableSpec_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_auto_ml_update_table_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient.batch_predict", + "method": { + "fullName": "google.cloud.automl.v1beta1.PredictionService.BatchPredict", + "service": { + "fullName": "google.cloud.automl.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "BatchPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.BatchPredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1beta1.types.BatchPredictInputConfig" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.BatchPredictOutputConfig" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_predict" + }, + "description": "Sample for BatchPredict", + "file": "automl_v1beta1_generated_prediction_service_batch_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_PredictionService_BatchPredict_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_prediction_service_batch_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient.batch_predict", + "method": { + "fullName": "google.cloud.automl.v1beta1.PredictionService.BatchPredict", + "service": { + "fullName": "google.cloud.automl.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "BatchPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.BatchPredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "input_config", + "type": "google.cloud.automl_v1beta1.types.BatchPredictInputConfig" + }, + { + "name": "output_config", + "type": "google.cloud.automl_v1beta1.types.BatchPredictOutputConfig" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_predict" + }, + "description": "Sample for BatchPredict", + "file": "automl_v1beta1_generated_prediction_service_batch_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_PredictionService_BatchPredict_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_prediction_service_batch_predict_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient.predict", + "method": { + "fullName": "google.cloud.automl.v1beta1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.automl.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.PredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "payload", + "type": "google.cloud.automl_v1beta1.types.ExamplePayload" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "automl_v1beta1_generated_prediction_service_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_PredictionService_Predict_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_prediction_service_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient.predict", + "method": { + "fullName": "google.cloud.automl.v1beta1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.automl.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.automl_v1beta1.types.PredictRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "payload", + "type": "google.cloud.automl_v1beta1.types.ExamplePayload" + }, + { + "name": "params", + "type": "MutableMapping[str, str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.automl_v1beta1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "automl_v1beta1_generated_prediction_service_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "automl_v1beta1_generated_PredictionService_Predict_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "automl_v1beta1_generated_prediction_service_predict_sync.py" + } + ] +} diff --git a/owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py new file mode 100644 index 00000000..17376fc5 --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py @@ -0,0 +1,201 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class automlCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_predict': ('name', 'input_config', 'output_config', 'params', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_model': ('parent', 'model', ), + 'delete_dataset': ('name', ), + 'delete_model': ('name', ), + 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ), + 'export_data': ('name', 'output_config', ), + 'export_evaluated_examples': ('name', 'output_config', ), + 'export_model': ('name', 'output_config', ), + 'get_annotation_spec': ('name', ), + 'get_column_spec': ('name', 'field_mask', ), + 'get_dataset': ('name', ), + 'get_model': ('name', ), + 'get_model_evaluation': ('name', ), + 'get_table_spec': ('name', 'field_mask', ), + 'import_data': ('name', 'input_config', ), + 'list_column_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_table_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ), + 'predict': ('name', 'payload', 'params', ), + 'undeploy_model': ('name', ), + 'update_column_spec': ('column_spec', 'update_mask', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_table_spec': ('table_spec', 'update_mask', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=automlCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the automl client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py new file mode 100644 index 00000000..95b4c8d1 --- /dev/null +++ b/owl-bot-staging/v1beta1/setup.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os + +import setuptools # type: ignore + +package_root = os.path.abspath(os.path.dirname(__file__)) + +name = 'google-cloud-automl' + + +description = "Google Cloud Automl API client library" + +version = {} +with open(os.path.join(package_root, 'google/cloud/automl/gapic_version.py')) as fp: + exec(fp.read(), version) +version = version["__version__"] + +if version[0] == "0": + release_status = "Development Status :: 4 - Beta" +else: + release_status = "Development Status :: 5 - Production/Stable" + +dependencies = [ + "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "proto-plus >= 1.22.0, <2.0.0dev", + "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", + "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", +] +url = "https://github.com/googleapis/python-automl" + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +packages = [ + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") +] + +namespaces = ["google", "google.cloud"] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url=url, + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + python_requires=">=3.7", + namespace_packages=namespaces, + install_requires=dependencies, + include_package_data=True, + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.10.txt b/owl-bot-staging/v1beta1/testing/constraints-3.10.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1beta1/testing/constraints-3.10.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.11.txt b/owl-bot-staging/v1beta1/testing/constraints-3.11.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1beta1/testing/constraints-3.11.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.12.txt b/owl-bot-staging/v1beta1/testing/constraints-3.12.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1beta1/testing/constraints-3.12.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.7.txt b/owl-bot-staging/v1beta1/testing/constraints-3.7.txt new file mode 100644 index 00000000..6c44adfe --- /dev/null +++ b/owl-bot-staging/v1beta1/testing/constraints-3.7.txt @@ -0,0 +1,9 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List all library dependencies and extras in this file. +# Pin the version to the lower bound. +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.34.0 +proto-plus==1.22.0 +protobuf==3.19.5 diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.8.txt b/owl-bot-staging/v1beta1/testing/constraints-3.8.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1beta1/testing/constraints-3.8.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.9.txt b/owl-bot-staging/v1beta1/testing/constraints-3.9.txt new file mode 100644 index 00000000..ed7f9aed --- /dev/null +++ b/owl-bot-staging/v1beta1/testing/constraints-3.9.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py new file mode 100644 index 00000000..1b4db446 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py new file mode 100644 index 00000000..8248424c --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py @@ -0,0 +1,14494 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1beta1.services.auto_ml import AutoMlAsyncClient +from google.cloud.automl_v1beta1.services.auto_ml import AutoMlClient +from google.cloud.automl_v1beta1.services.auto_ml import pagers +from google.cloud.automl_v1beta1.services.auto_ml import transports +from google.cloud.automl_v1beta1.types import annotation_spec +from google.cloud.automl_v1beta1.types import classification +from google.cloud.automl_v1beta1.types import column_spec +from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec +from google.cloud.automl_v1beta1.types import data_stats +from google.cloud.automl_v1beta1.types import data_types +from google.cloud.automl_v1beta1.types import dataset +from google.cloud.automl_v1beta1.types import dataset as gca_dataset +from google.cloud.automl_v1beta1.types import detection +from google.cloud.automl_v1beta1.types import image +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import model +from google.cloud.automl_v1beta1.types import model as gca_model +from google.cloud.automl_v1beta1.types import model_evaluation +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import regression +from google.cloud.automl_v1beta1.types import service +from google.cloud.automl_v1beta1.types import table_spec +from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec +from google.cloud.automl_v1beta1.types import tables +from google.cloud.automl_v1beta1.types import text +from google.cloud.automl_v1beta1.types import text_extraction +from google.cloud.automl_v1beta1.types import text_sentiment +from google.cloud.automl_v1beta1.types import translation +from google.cloud.automl_v1beta1.types import video +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoMlClient._get_default_mtls_endpoint(None) is None + assert AutoMlClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (AutoMlClient, "grpc"), + (AutoMlAsyncClient, "grpc_asyncio"), + (AutoMlClient, "rest"), +]) +def test_auto_ml_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.AutoMlGrpcTransport, "grpc"), + (transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.AutoMlRestTransport, "rest"), +]) +def test_auto_ml_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (AutoMlClient, "grpc"), + (AutoMlAsyncClient, "grpc_asyncio"), + (AutoMlClient, "rest"), +]) +def test_auto_ml_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +def test_auto_ml_client_get_transport_class(): + transport = AutoMlClient.get_transport_class() + available_transports = [ + transports.AutoMlGrpcTransport, + transports.AutoMlRestTransport, + ] + assert transport in available_transports + + transport = AutoMlClient.get_transport_class("grpc") + assert transport == transports.AutoMlGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + (AutoMlClient, transports.AutoMlRestTransport, "rest"), +]) +@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) +@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) +def test_auto_ml_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "true"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "false"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (AutoMlClient, transports.AutoMlRestTransport, "rest", "true"), + (AutoMlClient, transports.AutoMlRestTransport, "rest", "false"), +]) +@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) +@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_auto_ml_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + AutoMlClient, AutoMlAsyncClient +]) +@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) +@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) +def test_auto_ml_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), + (AutoMlClient, transports.AutoMlRestTransport, "rest"), +]) +def test_auto_ml_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (AutoMlClient, transports.AutoMlRestTransport, "rest", None), +]) +def test_auto_ml_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_auto_ml_client_client_options_from_dict(): + with mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = AutoMlClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_auto_ml_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + service.CreateDatasetRequest, + dict, +]) +def test_create_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateDatasetRequest() + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=service.CreateDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + )) + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_create_dataset_async_from_dict(): + await test_create_dataset_async(request_type=dict) + + +def test_create_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = gca_dataset.Dataset() + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + + +def test_create_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetDatasetRequest, + dict, +]) +def test_get_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetDatasetRequest() + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=service.GetDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + )) + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_dataset_async_from_dict(): + await test_get_dataset_async(request_type=dict) + + +def test_get_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = dataset.Dataset() + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + service.GetDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + service.GetDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListDatasetsRequest, + dict, +]) +def test_list_datasets(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListDatasetsRequest() + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=service.ListDatasetsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_datasets_async_from_dict(): + await test_list_datasets_async(request_type=dict) + + +def test_list_datasets_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = service.ListDatasetsResponse() + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_datasets_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_datasets_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + service.ListDatasetsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) +def test_list_datasets_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_datasets(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + service.UpdateDatasetRequest, + dict, +]) +def test_update_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateDatasetRequest() + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=service.UpdateDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + )) + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_dataset_async_from_dict(): + await test_update_dataset_async(request_type=dict) + + +def test_update_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = gca_dataset.Dataset() + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +def test_update_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + + +def test_update_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeleteDatasetRequest, + dict, +]) +def test_delete_dataset(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteDatasetRequest() + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=service.DeleteDatasetRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_dataset_async_from_dict(): + await test_delete_dataset_async(request_type=dict) + + +def test_delete_dataset_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_dataset_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_dataset_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + service.DeleteDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + service.DeleteDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ImportDataRequest, + dict, +]) +def test_import_data(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ImportDataRequest() + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=service.ImportDataRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_data_async_from_dict(): + await test_import_data_async(request_type=dict) + + +def test_import_data_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_import_data_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + + +def test_import_data_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + service.ImportDataRequest(), + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + service.ImportDataRequest(), + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportDataRequest, + dict, +]) +def test_export_data(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportDataRequest() + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=service.ExportDataRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_data_async_from_dict(): + await test_export_data_async(request_type=dict) + + +def test_export_data_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_data_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + + +def test_export_data_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + service.ExportDataRequest(), + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + service.ExportDataRequest(), + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetAnnotationSpecRequest, + dict, +]) +def test_get_annotation_spec(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + example_count=1396, + ) + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.example_count == 1396 + + +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetAnnotationSpecRequest() + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetAnnotationSpecRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + example_count=1396, + )) + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.example_count == 1396 + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async_from_dict(): + await test_get_annotation_spec_async(request_type=dict) + + +def test_get_annotation_spec_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = annotation_spec.AnnotationSpec() + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_annotation_spec_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_annotation_spec_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + service.GetAnnotationSpecRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetTableSpecRequest, + dict, +]) +def test_get_table_spec(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table_spec.TableSpec( + name='name_value', + time_column_spec_id='time_column_spec_id_value', + row_count=992, + valid_row_count=1615, + column_count=1302, + etag='etag_value', + ) + response = client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTableSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table_spec.TableSpec) + assert response.name == 'name_value' + assert response.time_column_spec_id == 'time_column_spec_id_value' + assert response.row_count == 992 + assert response.valid_row_count == 1615 + assert response.column_count == 1302 + assert response.etag == 'etag_value' + + +def test_get_table_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table_spec), + '__call__') as call: + client.get_table_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTableSpecRequest() + +@pytest.mark.asyncio +async def test_get_table_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetTableSpecRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table_spec.TableSpec( + name='name_value', + time_column_spec_id='time_column_spec_id_value', + row_count=992, + valid_row_count=1615, + column_count=1302, + etag='etag_value', + )) + response = await client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetTableSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table_spec.TableSpec) + assert response.name == 'name_value' + assert response.time_column_spec_id == 'time_column_spec_id_value' + assert response.row_count == 992 + assert response.valid_row_count == 1615 + assert response.column_count == 1302 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_table_spec_async_from_dict(): + await test_get_table_spec_async(request_type=dict) + + +def test_get_table_spec_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTableSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table_spec), + '__call__') as call: + call.return_value = table_spec.TableSpec() + client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_table_spec_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetTableSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table_spec.TableSpec()) + await client.get_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_table_spec_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table_spec.TableSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_table_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_table_spec_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table_spec( + service.GetTableSpecRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_table_spec_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table_spec.TableSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table_spec.TableSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_table_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_table_spec_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_table_spec( + service.GetTableSpecRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListTableSpecsRequest, + dict, +]) +def test_list_table_specs(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTableSpecsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTableSpecsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTableSpecsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_table_specs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + client.list_table_specs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTableSpecsRequest() + +@pytest.mark.asyncio +async def test_list_table_specs_async(transport: str = 'grpc_asyncio', request_type=service.ListTableSpecsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListTableSpecsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListTableSpecsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTableSpecsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_table_specs_async_from_dict(): + await test_list_table_specs_async(request_type=dict) + + +def test_list_table_specs_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTableSpecsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + call.return_value = service.ListTableSpecsResponse() + client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_table_specs_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListTableSpecsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListTableSpecsResponse()) + await client.list_table_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_table_specs_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTableSpecsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_table_specs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_table_specs_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_table_specs( + service.ListTableSpecsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_table_specs_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListTableSpecsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListTableSpecsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_table_specs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_table_specs_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_table_specs( + service.ListTableSpecsRequest(), + parent='parent_value', + ) + + +def test_list_table_specs_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token='abc', + ), + service.ListTableSpecsResponse( + table_specs=[], + next_page_token='def', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + ], + next_page_token='ghi', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_table_specs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table_spec.TableSpec) + for i in results) +def test_list_table_specs_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token='abc', + ), + service.ListTableSpecsResponse( + table_specs=[], + next_page_token='def', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + ], + next_page_token='ghi', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + ), + RuntimeError, + ) + pages = list(client.list_table_specs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_table_specs_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token='abc', + ), + service.ListTableSpecsResponse( + table_specs=[], + next_page_token='def', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + ], + next_page_token='ghi', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_table_specs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table_spec.TableSpec) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_table_specs_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_table_specs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token='abc', + ), + service.ListTableSpecsResponse( + table_specs=[], + next_page_token='def', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + ], + next_page_token='ghi', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_table_specs(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + service.UpdateTableSpecRequest, + dict, +]) +def test_update_table_spec(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_table_spec.TableSpec( + name='name_value', + time_column_spec_id='time_column_spec_id_value', + row_count=992, + valid_row_count=1615, + column_count=1302, + etag='etag_value', + ) + response = client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateTableSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_table_spec.TableSpec) + assert response.name == 'name_value' + assert response.time_column_spec_id == 'time_column_spec_id_value' + assert response.row_count == 992 + assert response.valid_row_count == 1615 + assert response.column_count == 1302 + assert response.etag == 'etag_value' + + +def test_update_table_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), + '__call__') as call: + client.update_table_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateTableSpecRequest() + +@pytest.mark.asyncio +async def test_update_table_spec_async(transport: str = 'grpc_asyncio', request_type=service.UpdateTableSpecRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_table_spec.TableSpec( + name='name_value', + time_column_spec_id='time_column_spec_id_value', + row_count=992, + valid_row_count=1615, + column_count=1302, + etag='etag_value', + )) + response = await client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateTableSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_table_spec.TableSpec) + assert response.name == 'name_value' + assert response.time_column_spec_id == 'time_column_spec_id_value' + assert response.row_count == 992 + assert response.valid_row_count == 1615 + assert response.column_count == 1302 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_table_spec_async_from_dict(): + await test_update_table_spec_async(request_type=dict) + + +def test_update_table_spec_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateTableSpecRequest() + + request.table_spec.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), + '__call__') as call: + call.return_value = gca_table_spec.TableSpec() + client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table_spec.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_table_spec_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateTableSpecRequest() + + request.table_spec.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_table_spec.TableSpec()) + await client.update_table_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table_spec.name=name_value', + ) in kw['metadata'] + + +def test_update_table_spec_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_table_spec.TableSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_table_spec( + table_spec=gca_table_spec.TableSpec(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_spec + mock_val = gca_table_spec.TableSpec(name='name_value') + assert arg == mock_val + + +def test_update_table_spec_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table_spec( + service.UpdateTableSpecRequest(), + table_spec=gca_table_spec.TableSpec(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_update_table_spec_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_table_spec.TableSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_table_spec.TableSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_table_spec( + table_spec=gca_table_spec.TableSpec(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_spec + mock_val = gca_table_spec.TableSpec(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_table_spec_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_table_spec( + service.UpdateTableSpecRequest(), + table_spec=gca_table_spec.TableSpec(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetColumnSpecRequest, + dict, +]) +def test_get_column_spec(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = column_spec.ColumnSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + response = client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetColumnSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, column_spec.ColumnSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_get_column_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_column_spec), + '__call__') as call: + client.get_column_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetColumnSpecRequest() + +@pytest.mark.asyncio +async def test_get_column_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetColumnSpecRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(column_spec.ColumnSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + )) + response = await client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetColumnSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, column_spec.ColumnSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_column_spec_async_from_dict(): + await test_get_column_spec_async(request_type=dict) + + +def test_get_column_spec_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetColumnSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_column_spec), + '__call__') as call: + call.return_value = column_spec.ColumnSpec() + client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_column_spec_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetColumnSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_column_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(column_spec.ColumnSpec()) + await client.get_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_column_spec_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = column_spec.ColumnSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_column_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_column_spec_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_column_spec( + service.GetColumnSpecRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_column_spec_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = column_spec.ColumnSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(column_spec.ColumnSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_column_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_column_spec_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_column_spec( + service.GetColumnSpecRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListColumnSpecsRequest, + dict, +]) +def test_list_column_specs(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListColumnSpecsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListColumnSpecsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListColumnSpecsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_column_specs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + client.list_column_specs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListColumnSpecsRequest() + +@pytest.mark.asyncio +async def test_list_column_specs_async(transport: str = 'grpc_asyncio', request_type=service.ListColumnSpecsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListColumnSpecsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListColumnSpecsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListColumnSpecsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_column_specs_async_from_dict(): + await test_list_column_specs_async(request_type=dict) + + +def test_list_column_specs_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListColumnSpecsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + call.return_value = service.ListColumnSpecsResponse() + client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_column_specs_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListColumnSpecsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListColumnSpecsResponse()) + await client.list_column_specs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_column_specs_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListColumnSpecsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_column_specs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_column_specs_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_column_specs( + service.ListColumnSpecsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_column_specs_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListColumnSpecsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListColumnSpecsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_column_specs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_column_specs_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_column_specs( + service.ListColumnSpecsRequest(), + parent='parent_value', + ) + + +def test_list_column_specs_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token='abc', + ), + service.ListColumnSpecsResponse( + column_specs=[], + next_page_token='def', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + ], + next_page_token='ghi', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_column_specs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, column_spec.ColumnSpec) + for i in results) +def test_list_column_specs_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token='abc', + ), + service.ListColumnSpecsResponse( + column_specs=[], + next_page_token='def', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + ], + next_page_token='ghi', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + ), + RuntimeError, + ) + pages = list(client.list_column_specs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_column_specs_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token='abc', + ), + service.ListColumnSpecsResponse( + column_specs=[], + next_page_token='def', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + ], + next_page_token='ghi', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_column_specs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, column_spec.ColumnSpec) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_column_specs_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token='abc', + ), + service.ListColumnSpecsResponse( + column_specs=[], + next_page_token='def', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + ], + next_page_token='ghi', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_column_specs(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + service.UpdateColumnSpecRequest, + dict, +]) +def test_update_column_spec(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_column_spec.ColumnSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + response = client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateColumnSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_column_spec.ColumnSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_update_column_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), + '__call__') as call: + client.update_column_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateColumnSpecRequest() + +@pytest.mark.asyncio +async def test_update_column_spec_async(transport: str = 'grpc_asyncio', request_type=service.UpdateColumnSpecRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_column_spec.ColumnSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + )) + response = await client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UpdateColumnSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_column_spec.ColumnSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_column_spec_async_from_dict(): + await test_update_column_spec_async(request_type=dict) + + +def test_update_column_spec_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateColumnSpecRequest() + + request.column_spec.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), + '__call__') as call: + call.return_value = gca_column_spec.ColumnSpec() + client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'column_spec.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_column_spec_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UpdateColumnSpecRequest() + + request.column_spec.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_column_spec.ColumnSpec()) + await client.update_column_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'column_spec.name=name_value', + ) in kw['metadata'] + + +def test_update_column_spec_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_column_spec.ColumnSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_column_spec( + column_spec=gca_column_spec.ColumnSpec(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].column_spec + mock_val = gca_column_spec.ColumnSpec(name='name_value') + assert arg == mock_val + + +def test_update_column_spec_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_column_spec( + service.UpdateColumnSpecRequest(), + column_spec=gca_column_spec.ColumnSpec(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_update_column_spec_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_column_spec.ColumnSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_column_spec.ColumnSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_column_spec( + column_spec=gca_column_spec.ColumnSpec(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].column_spec + mock_val = gca_column_spec.ColumnSpec(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_column_spec_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_column_spec( + service.UpdateColumnSpecRequest(), + column_spec=gca_column_spec.ColumnSpec(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + service.CreateModelRequest, + dict, +]) +def test_create_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + client.create_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateModelRequest() + +@pytest.mark.asyncio +async def test_create_model_async(transport: str = 'grpc_asyncio', request_type=service.CreateModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.CreateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_model_async_from_dict(): + await test_create_model_async(request_type=dict) + + +def test_create_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.CreateModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model( + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) + assert arg == mock_val + + +def test_create_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model( + service.CreateModelRequest(), + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + +@pytest.mark.asyncio +async def test_create_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model( + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model( + service.CreateModelRequest(), + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelRequest, + dict, +]) +def test_get_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=model.Model.DeploymentState.DEPLOYED, + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelRequest() + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=service.GetModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=model.Model.DeploymentState.DEPLOYED, + )) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + service.GetModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + service.GetModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelsRequest, + dict, +]) +def test_list_models(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelsRequest() + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=service.ListModelsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = service.ListModelsResponse() + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_models_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_models_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + service.ListModelsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) +def test_list_models_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_models(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + service.DeleteModelRequest, + dict, +]) +def test_delete_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteModelRequest() + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=service.DeleteModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_async_from_dict(): + await test_delete_model_async(request_type=dict) + + +def test_delete_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + service.DeleteModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + service.DeleteModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeployModelRequest, + dict, +]) +def test_deploy_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeployModelRequest() + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=service.DeployModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_model_async_from_dict(): + await test_deploy_model_async(request_type=dict) + + +def test_deploy_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.DeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_deploy_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_deploy_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + service.DeployModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + service.DeployModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.UndeployModelRequest, + dict, +]) +def test_undeploy_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.UndeployModelRequest() + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=service.UndeployModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_model_async_from_dict(): + await test_undeploy_model_async(request_type=dict) + + +def test_undeploy_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.UndeployModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_undeploy_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_undeploy_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + service.UndeployModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + service.UndeployModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportModelRequest, + dict, +]) +def test_export_model(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportModelRequest() + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=service.ExportModelRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_model_async_from_dict(): + await test_export_model_async(request_type=dict) + + +def test_export_model_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_model_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + + +def test_export_model_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + service.ExportModelRequest(), + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + service.ExportModelRequest(), + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportEvaluatedExamplesRequest, + dict, +]) +def test_export_evaluated_examples(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportEvaluatedExamplesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_evaluated_examples_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), + '__call__') as call: + client.export_evaluated_examples() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportEvaluatedExamplesRequest() + +@pytest.mark.asyncio +async def test_export_evaluated_examples_async(transport: str = 'grpc_asyncio', request_type=service.ExportEvaluatedExamplesRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ExportEvaluatedExamplesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_evaluated_examples_async_from_dict(): + await test_export_evaluated_examples_async(request_type=dict) + + +def test_export_evaluated_examples_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportEvaluatedExamplesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_evaluated_examples_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ExportEvaluatedExamplesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_evaluated_examples(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_evaluated_examples_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_evaluated_examples( + name='name_value', + output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')) + assert arg == mock_val + + +def test_export_evaluated_examples_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_evaluated_examples( + service.ExportEvaluatedExamplesRequest(), + name='name_value', + output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), + ) + +@pytest.mark.asyncio +async def test_export_evaluated_examples_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_evaluated_examples( + name='name_value', + output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_evaluated_examples_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_evaluated_examples( + service.ExportEvaluatedExamplesRequest(), + name='name_value', + output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelEvaluationRequest, + dict, +]) +def test_get_model_evaluation(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name='name_value', + annotation_spec_id='annotation_spec_id_value', + display_name='display_name_value', + evaluated_example_count=2446, + ) + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.annotation_spec_id == 'annotation_spec_id_value' + assert response.display_name == 'display_name_value' + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelEvaluationRequest() + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=service.GetModelEvaluationRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( + name='name_value', + annotation_spec_id='annotation_spec_id_value', + display_name='display_name_value', + evaluated_example_count=2446, + )) + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.annotation_spec_id == 'annotation_spec_id_value' + assert response.display_name == 'display_name_value' + assert response.evaluated_example_count == 2446 + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async_from_dict(): + await test_get_model_evaluation_async(request_type=dict) + + +def test_get_model_evaluation_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = model_evaluation.ModelEvaluation() + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_evaluation_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_evaluation_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + service.GetModelEvaluationRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + service.GetModelEvaluationRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelEvaluationsRequest, + dict, +]) +def test_list_model_evaluations(request_type, transport: str = 'grpc'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelEvaluationsRequest() + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=service.ListModelEvaluationsRequest): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_from_dict(): + await test_list_model_evaluations_async(request_type=dict) + + +def test_list_model_evaluations_field_headers(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = service.ListModelEvaluationsResponse() + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_evaluations_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_model_evaluations_flattened_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluations_pager(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) +def test_list_model_evaluations_pages(transport_name: str = "grpc"): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_model_evaluations(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.CreateDatasetRequest, + dict, +]) +def test_create_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'video_classification_dataset_metadata': {}, 'video_object_tracking_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'tables_dataset_metadata': {'primary_table_spec_id': 'primary_table_spec_id_value', 'target_column_spec_id': 'target_column_spec_id_value', 'weight_column_spec_id': 'weight_column_spec_id_value', 'ml_use_column_spec_id': 'ml_use_column_spec_id_value', 'target_column_correlations': {}, 'stats_update_time': {'seconds': 751, 'nanos': 543}}, 'name': 'name_value', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {}, 'etag': 'etag_value'} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.CreateDatasetRequest.meta.fields["dataset"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["dataset"][field])): + del request_init["dataset"][field][i][subfield] + else: + del request_init["dataset"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_dataset(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_create_dataset_rest_required_fields(request_type=service.CreateDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "dataset", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_create_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.CreateDatasetRequest.pb(service.CreateDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) + + request = service.CreateDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_dataset.Dataset() + + client.create_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_dataset_rest_bad_request(transport: str = 'rest', request_type=service.CreateDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_dataset(request) + + +def test_create_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) + + +def test_create_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + +def test_create_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetDatasetRequest, + dict, +]) +def test_get_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_dataset(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_get_dataset_rest_required_fields(request_type=service.GetDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = dataset.Dataset() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetDatasetRequest.pb(service.GetDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = dataset.Dataset.to_json(dataset.Dataset()) + + request = service.GetDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = dataset.Dataset() + + client.get_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_dataset_rest_bad_request(transport: str = 'rest', request_type=service.GetDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_dataset(request) + + +def test_get_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = dataset.Dataset() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) + + +def test_get_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + service.GetDatasetRequest(), + name='name_value', + ) + + +def test_get_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListDatasetsRequest, + dict, +]) +def test_list_datasets_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListDatasetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_datasets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_rest_required_fields(request_type=service.ListDatasetsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListDatasetsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListDatasetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_datasets(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_datasets_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_datasets._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_datasets_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_datasets") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_datasets") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListDatasetsRequest.pb(service.ListDatasetsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListDatasetsResponse.to_json(service.ListDatasetsResponse()) + + request = service.ListDatasetsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListDatasetsResponse() + + client.list_datasets(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_datasets_rest_bad_request(transport: str = 'rest', request_type=service.ListDatasetsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_datasets(request) + + +def test_list_datasets_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListDatasetsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListDatasetsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_datasets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) + + +def test_list_datasets_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListDatasetsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + pager = client.list_datasets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) + + pages = list(client.list_datasets(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.UpdateDatasetRequest, + dict, +]) +def test_update_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} + request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'video_classification_dataset_metadata': {}, 'video_object_tracking_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'tables_dataset_metadata': {'primary_table_spec_id': 'primary_table_spec_id_value', 'target_column_spec_id': 'target_column_spec_id_value', 'weight_column_spec_id': 'weight_column_spec_id_value', 'ml_use_column_spec_id': 'ml_use_column_spec_id_value', 'target_column_correlations': {}, 'stats_update_time': {'seconds': 751, 'nanos': 543}}, 'name': 'projects/sample1/locations/sample2/datasets/sample3', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {}, 'etag': 'etag_value'} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.UpdateDatasetRequest.meta.fields["dataset"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["dataset"][field])): + del request_init["dataset"][field][i][subfield] + else: + del request_init["dataset"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + example_count=1396, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_dataset(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.example_count == 1396 + assert response.etag == 'etag_value' + + +def test_update_dataset_rest_required_fields(request_type=service.UpdateDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("dataset", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_update_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UpdateDatasetRequest.pb(service.UpdateDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) + + request = service.UpdateDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_dataset.Dataset() + + client.update_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_dataset_rest_bad_request(transport: str = 'rest', request_type=service.UpdateDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_dataset(request) + + +def test_update_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_dataset.Dataset() + + # get arguments that satisfy an http rule for this method + sample_request = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} + + # get truthy value for each flattened field + mock_args = dict( + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_dataset.Dataset.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) + + +def test_update_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), + ) + + +def test_update_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeleteDatasetRequest, + dict, +]) +def test_delete_dataset_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_dataset(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_dataset_rest_required_fields(request_type=service.DeleteDatasetRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_dataset(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_dataset_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_dataset._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_dataset_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_dataset") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_dataset") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.DeleteDatasetRequest.pb(service.DeleteDatasetRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.DeleteDatasetRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_dataset_rest_bad_request(transport: str = 'rest', request_type=service.DeleteDatasetRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_dataset(request) + + +def test_delete_dataset_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_dataset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) + + +def test_delete_dataset_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + service.DeleteDatasetRequest(), + name='name_value', + ) + + +def test_delete_dataset_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ImportDataRequest, + dict, +]) +def test_import_data_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.import_data(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_import_data_rest_required_fields(request_type=service.ImportDataRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.import_data(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_import_data_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.import_data._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "inputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_import_data_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_import_data") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_import_data") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ImportDataRequest.pb(service.ImportDataRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.ImportDataRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.import_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_import_data_rest_bad_request(transport: str = 'rest', request_type=service.ImportDataRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.import_data(request) + + +def test_import_data_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.import_data(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}:importData" % client.transport._host, args[1]) + + +def test_import_data_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + service.ImportDataRequest(), + name='name_value', + input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + ) + + +def test_import_data_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportDataRequest, + dict, +]) +def test_export_data_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.export_data(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_export_data_rest_required_fields(request_type=service.ExportDataRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.export_data(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_export_data_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.export_data._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_export_data_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_export_data") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_data") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ExportDataRequest.pb(service.ExportDataRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.ExportDataRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.export_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_export_data_rest_bad_request(transport: str = 'rest', request_type=service.ExportDataRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.export_data(request) + + +def test_export_data_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.export_data(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}:exportData" % client.transport._host, args[1]) + + +def test_export_data_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + service.ExportDataRequest(), + name='name_value', + output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +def test_export_data_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetAnnotationSpecRequest, + dict, +]) +def test_get_annotation_spec_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + example_count=1396, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = annotation_spec.AnnotationSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_annotation_spec(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.example_count == 1396 + + +def test_get_annotation_spec_rest_required_fields(request_type=service.GetAnnotationSpecRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = annotation_spec.AnnotationSpec() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = annotation_spec.AnnotationSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_annotation_spec(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_annotation_spec_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_annotation_spec._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_annotation_spec_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_annotation_spec") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_annotation_spec") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetAnnotationSpecRequest.pb(service.GetAnnotationSpecRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = annotation_spec.AnnotationSpec.to_json(annotation_spec.AnnotationSpec()) + + request = service.GetAnnotationSpecRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = annotation_spec.AnnotationSpec() + + client.get_annotation_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_annotation_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetAnnotationSpecRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_annotation_spec(request) + + +def test_get_annotation_spec_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = annotation_spec.AnnotationSpec() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = annotation_spec.AnnotationSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_annotation_spec(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}" % client.transport._host, args[1]) + + +def test_get_annotation_spec_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +def test_get_annotation_spec_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetTableSpecRequest, + dict, +]) +def test_get_table_spec_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table_spec.TableSpec( + name='name_value', + time_column_spec_id='time_column_spec_id_value', + row_count=992, + valid_row_count=1615, + column_count=1302, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table_spec.TableSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_table_spec(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table_spec.TableSpec) + assert response.name == 'name_value' + assert response.time_column_spec_id == 'time_column_spec_id_value' + assert response.row_count == 992 + assert response.valid_row_count == 1615 + assert response.column_count == 1302 + assert response.etag == 'etag_value' + + +def test_get_table_spec_rest_required_fields(request_type=service.GetTableSpecRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_table_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_table_spec._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("field_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table_spec.TableSpec() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table_spec.TableSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_table_spec(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_table_spec_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_table_spec._get_unset_required_fields({}) + assert set(unset_fields) == (set(("fieldMask", )) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_table_spec_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_table_spec") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_table_spec") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetTableSpecRequest.pb(service.GetTableSpecRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table_spec.TableSpec.to_json(table_spec.TableSpec()) + + request = service.GetTableSpecRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table_spec.TableSpec() + + client.get_table_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_table_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetTableSpecRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_table_spec(request) + + +def test_get_table_spec_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table_spec.TableSpec() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table_spec.TableSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_table_spec(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}" % client.transport._host, args[1]) + + +def test_get_table_spec_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table_spec( + service.GetTableSpecRequest(), + name='name_value', + ) + + +def test_get_table_spec_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListTableSpecsRequest, + dict, +]) +def test_list_table_specs_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListTableSpecsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListTableSpecsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_table_specs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTableSpecsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_table_specs_rest_required_fields(request_type=service.ListTableSpecsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_table_specs._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_table_specs._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("field_mask", "filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListTableSpecsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListTableSpecsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_table_specs(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_table_specs_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_table_specs._get_unset_required_fields({}) + assert set(unset_fields) == (set(("fieldMask", "filter", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_table_specs_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_table_specs") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_table_specs") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListTableSpecsRequest.pb(service.ListTableSpecsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListTableSpecsResponse.to_json(service.ListTableSpecsResponse()) + + request = service.ListTableSpecsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListTableSpecsResponse() + + client.list_table_specs(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_table_specs_rest_bad_request(transport: str = 'rest', request_type=service.ListTableSpecsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_table_specs(request) + + +def test_list_table_specs_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListTableSpecsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListTableSpecsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_table_specs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs" % client.transport._host, args[1]) + + +def test_list_table_specs_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_table_specs( + service.ListTableSpecsRequest(), + parent='parent_value', + ) + + +def test_list_table_specs_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + next_page_token='abc', + ), + service.ListTableSpecsResponse( + table_specs=[], + next_page_token='def', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + ], + next_page_token='ghi', + ), + service.ListTableSpecsResponse( + table_specs=[ + table_spec.TableSpec(), + table_spec.TableSpec(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListTableSpecsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} + + pager = client.list_table_specs(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table_spec.TableSpec) + for i in results) + + pages = list(client.list_table_specs(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.UpdateTableSpecRequest, + dict, +]) +def test_update_table_spec_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'}} + request_init["table_spec"] = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4', 'time_column_spec_id': 'time_column_spec_id_value', 'row_count': 992, 'valid_row_count': 1615, 'column_count': 1302, 'input_configs': [{'gcs_source': {'input_uris': ['input_uris_value1', 'input_uris_value2']}, 'bigquery_source': {'input_uri': 'input_uri_value'}, 'params': {}}], 'etag': 'etag_value'} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.UpdateTableSpecRequest.meta.fields["table_spec"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table_spec"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table_spec"][field])): + del request_init["table_spec"][field][i][subfield] + else: + del request_init["table_spec"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_table_spec.TableSpec( + name='name_value', + time_column_spec_id='time_column_spec_id_value', + row_count=992, + valid_row_count=1615, + column_count=1302, + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_table_spec.TableSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_table_spec(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_table_spec.TableSpec) + assert response.name == 'name_value' + assert response.time_column_spec_id == 'time_column_spec_id_value' + assert response.row_count == 992 + assert response.valid_row_count == 1615 + assert response.column_count == 1302 + assert response.etag == 'etag_value' + + +def test_update_table_spec_rest_required_fields(request_type=service.UpdateTableSpecRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_table_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_table_spec._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_table_spec.TableSpec() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_table_spec.TableSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_table_spec(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_table_spec_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_table_spec._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("tableSpec", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_table_spec_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_update_table_spec") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_table_spec") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UpdateTableSpecRequest.pb(service.UpdateTableSpecRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_table_spec.TableSpec.to_json(gca_table_spec.TableSpec()) + + request = service.UpdateTableSpecRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_table_spec.TableSpec() + + client.update_table_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_table_spec_rest_bad_request(transport: str = 'rest', request_type=service.UpdateTableSpecRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_table_spec(request) + + +def test_update_table_spec_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_table_spec.TableSpec() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'}} + + # get truthy value for each flattened field + mock_args = dict( + table_spec=gca_table_spec.TableSpec(name='name_value'), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_table_spec.TableSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_table_spec(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}" % client.transport._host, args[1]) + + +def test_update_table_spec_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table_spec( + service.UpdateTableSpecRequest(), + table_spec=gca_table_spec.TableSpec(name='name_value'), + ) + + +def test_update_table_spec_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetColumnSpecRequest, + dict, +]) +def test_get_column_spec_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = column_spec.ColumnSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = column_spec.ColumnSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_column_spec(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, column_spec.ColumnSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_get_column_spec_rest_required_fields(request_type=service.GetColumnSpecRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_column_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_column_spec._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("field_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = column_spec.ColumnSpec() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = column_spec.ColumnSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_column_spec(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_column_spec_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_column_spec._get_unset_required_fields({}) + assert set(unset_fields) == (set(("fieldMask", )) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_column_spec_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_column_spec") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_column_spec") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetColumnSpecRequest.pb(service.GetColumnSpecRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = column_spec.ColumnSpec.to_json(column_spec.ColumnSpec()) + + request = service.GetColumnSpecRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = column_spec.ColumnSpec() + + client.get_column_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_column_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetColumnSpecRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_column_spec(request) + + +def test_get_column_spec_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = column_spec.ColumnSpec() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = column_spec.ColumnSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_column_spec(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}" % client.transport._host, args[1]) + + +def test_get_column_spec_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_column_spec( + service.GetColumnSpecRequest(), + name='name_value', + ) + + +def test_get_column_spec_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListColumnSpecsRequest, + dict, +]) +def test_list_column_specs_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListColumnSpecsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListColumnSpecsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_column_specs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListColumnSpecsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_column_specs_rest_required_fields(request_type=service.ListColumnSpecsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_column_specs._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_column_specs._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("field_mask", "filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListColumnSpecsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListColumnSpecsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_column_specs(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_column_specs_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_column_specs._get_unset_required_fields({}) + assert set(unset_fields) == (set(("fieldMask", "filter", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_column_specs_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_column_specs") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_column_specs") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListColumnSpecsRequest.pb(service.ListColumnSpecsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListColumnSpecsResponse.to_json(service.ListColumnSpecsResponse()) + + request = service.ListColumnSpecsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListColumnSpecsResponse() + + client.list_column_specs(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_column_specs_rest_bad_request(transport: str = 'rest', request_type=service.ListColumnSpecsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_column_specs(request) + + +def test_list_column_specs_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListColumnSpecsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListColumnSpecsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_column_specs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs" % client.transport._host, args[1]) + + +def test_list_column_specs_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_column_specs( + service.ListColumnSpecsRequest(), + parent='parent_value', + ) + + +def test_list_column_specs_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + next_page_token='abc', + ), + service.ListColumnSpecsResponse( + column_specs=[], + next_page_token='def', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + ], + next_page_token='ghi', + ), + service.ListColumnSpecsResponse( + column_specs=[ + column_spec.ColumnSpec(), + column_spec.ColumnSpec(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListColumnSpecsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} + + pager = client.list_column_specs(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, column_spec.ColumnSpec) + for i in results) + + pages = list(client.list_column_specs(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.UpdateColumnSpecRequest, + dict, +]) +def test_update_column_spec_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'column_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'}} + request_init["column_spec"] = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5', 'data_type': {'list_element_type': {}, 'struct_type': {'fields': {}}, 'time_format': 'time_format_value', 'type_code': 3, 'nullable': True}, 'display_name': 'display_name_value', 'data_stats': {'float64_stats': {'mean': 0.417, 'standard_deviation': 0.1907, 'quantiles': [0.983, 0.984], 'histogram_buckets': [{'min_': 0.419, 'max_': 0.421, 'count': 553}]}, 'string_stats': {'top_unigram_stats': [{'value': 'value_value', 'count': 553}]}, 'timestamp_stats': {'granular_stats': {}}, 'array_stats': {'member_stats': {}}, 'struct_stats': {'field_stats': {}}, 'category_stats': {'top_category_stats': [{'value': 'value_value', 'count': 553}]}, 'distinct_value_count': 2150, 'null_value_count': 1727, 'valid_value_count': 1812}, 'top_correlated_columns': [{'column_spec_id': 'column_spec_id_value', 'correlation_stats': {'cramers_v': 0.962}}], 'etag': 'etag_value'} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.UpdateColumnSpecRequest.meta.fields["column_spec"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["column_spec"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["column_spec"][field])): + del request_init["column_spec"][field][i][subfield] + else: + del request_init["column_spec"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_column_spec.ColumnSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_column_spec.ColumnSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_column_spec(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_column_spec.ColumnSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_update_column_spec_rest_required_fields(request_type=service.UpdateColumnSpecRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_column_spec._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_column_spec._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_column_spec.ColumnSpec() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_column_spec.ColumnSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_column_spec(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_column_spec_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_column_spec._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("columnSpec", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_column_spec_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_update_column_spec") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_column_spec") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UpdateColumnSpecRequest.pb(service.UpdateColumnSpecRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_column_spec.ColumnSpec.to_json(gca_column_spec.ColumnSpec()) + + request = service.UpdateColumnSpecRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_column_spec.ColumnSpec() + + client.update_column_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_column_spec_rest_bad_request(transport: str = 'rest', request_type=service.UpdateColumnSpecRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'column_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_column_spec(request) + + +def test_update_column_spec_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gca_column_spec.ColumnSpec() + + # get arguments that satisfy an http rule for this method + sample_request = {'column_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'}} + + # get truthy value for each flattened field + mock_args = dict( + column_spec=gca_column_spec.ColumnSpec(name='name_value'), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_column_spec.ColumnSpec.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_column_spec(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}" % client.transport._host, args[1]) + + +def test_update_column_spec_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_column_spec( + service.UpdateColumnSpecRequest(), + column_spec=gca_column_spec.ColumnSpec(name='name_value'), + ) + + +def test_update_column_spec_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.CreateModelRequest, + dict, +]) +def test_create_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request_init["model"] = {'translation_model_metadata': {'base_model': 'base_model_value', 'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_model_metadata': {'base_model_id': 'base_model_id_value', 'train_budget': 1272, 'train_cost': 1078, 'stop_reason': 'stop_reason_value', 'model_type': 'model_type_value', 'node_qps': 0.857, 'node_count': 1070}, 'text_classification_model_metadata': {'classification_type': 1}, 'image_object_detection_model_metadata': {'model_type': 'model_type_value', 'node_count': 1070, 'node_qps': 0.857, 'stop_reason': 'stop_reason_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881}, 'video_classification_model_metadata': {}, 'video_object_tracking_model_metadata': {}, 'text_extraction_model_metadata': {'model_hint': 'model_hint_value'}, 'tables_model_metadata': {'optimization_objective_recall_value': 0.37270000000000003, 'optimization_objective_precision_value': 0.4072, 'target_column_spec': {'name': 'name_value', 'data_type': {'list_element_type': {}, 'struct_type': {'fields': {}}, 'time_format': 'time_format_value', 'type_code': 3, 'nullable': True}, 'display_name': 'display_name_value', 'data_stats': {'float64_stats': {'mean': 0.417, 'standard_deviation': 0.1907, 'quantiles': [0.983, 0.984], 'histogram_buckets': [{'min_': 0.419, 'max_': 0.421, 'count': 553}]}, 'string_stats': {'top_unigram_stats': [{'value': 'value_value', 'count': 553}]}, 'timestamp_stats': {'granular_stats': {}}, 'array_stats': {'member_stats': {}}, 'struct_stats': {'field_stats': {}}, 'category_stats': {'top_category_stats': [{'value': 'value_value', 'count': 553}]}, 'distinct_value_count': 2150, 'null_value_count': 1727, 'valid_value_count': 1812}, 'top_correlated_columns': [{'column_spec_id': 'column_spec_id_value', 'correlation_stats': {'cramers_v': 0.962}}], 'etag': 'etag_value'}, 'input_feature_column_specs': {}, 'optimization_objective': 'optimization_objective_value', 'tables_model_column_info': [{'column_spec_name': 'column_spec_name_value', 'column_display_name': 'column_display_name_value', 'feature_importance': 0.1917}], 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881, 'disable_early_stopping': True}, 'text_sentiment_model_metadata': {}, 'name': 'name_value', 'display_name': 'display_name_value', 'dataset_id': 'dataset_id_value', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'deployment_state': 1} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = service.CreateModelRequest.meta.fields["model"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["model"][field])): + del request_init["model"][field][i][subfield] + else: + del request_init["model"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_model_rest_required_fields(request_type=service.CreateModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "model", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_create_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.CreateModelRequest.pb(service.CreateModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.CreateModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_model_rest_bad_request(transport: str = 'rest', request_type=service.CreateModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_model(request) + + +def test_create_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) + + +def test_create_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model( + service.CreateModelRequest(), + parent='parent_value', + model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), + ) + + +def test_create_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelRequest, + dict, +]) +def test_get_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model.Model( + name='name_value', + display_name='display_name_value', + dataset_id='dataset_id_value', + deployment_state=model.Model.DeploymentState.DEPLOYED, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_model(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.dataset_id == 'dataset_id_value' + assert response.deployment_state == model.Model.DeploymentState.DEPLOYED + + +def test_get_model_rest_required_fields(request_type=service.GetModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model.Model() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetModelRequest.pb(service.GetModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model.Model.to_json(model.Model()) + + request = service.GetModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model.Model() + + client.get_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_rest_bad_request(transport: str = 'rest', request_type=service.GetModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model(request) + + +def test_get_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model.Model() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model.Model.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) + + +def test_get_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + service.GetModelRequest(), + name='name_value', + ) + + +def test_get_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelsRequest, + dict, +]) +def test_list_models_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_models(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_rest_required_fields(request_type=service.ListModelsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListModelsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_models(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_models_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_models._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_models_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_models") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_models") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListModelsRequest.pb(service.ListModelsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListModelsResponse.to_json(service.ListModelsResponse()) + + request = service.ListModelsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListModelsResponse() + + client.list_models(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_models_rest_bad_request(transport: str = 'rest', request_type=service.ListModelsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_models(request) + + +def test_list_models_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_models(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) + + +def test_list_models_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + service.ListModelsResponse( + model=[], + next_page_token='def', + ), + service.ListModelsResponse( + model=[ + model.Model(), + ], + next_page_token='ghi', + ), + service.ListModelsResponse( + model=[ + model.Model(), + model.Model(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListModelsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + pager = client.list_models(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) + + pages = list(client.list_models(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + service.DeleteModelRequest, + dict, +]) +def test_delete_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_model_rest_required_fields(request_type=service.DeleteModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.DeleteModelRequest.pb(service.DeleteModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.DeleteModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_model_rest_bad_request(transport: str = 'rest', request_type=service.DeleteModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_model(request) + + +def test_delete_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) + + +def test_delete_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + service.DeleteModelRequest(), + name='name_value', + ) + + +def test_delete_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.DeployModelRequest, + dict, +]) +def test_deploy_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.deploy_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_deploy_model_rest_required_fields(request_type=service.DeployModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.deploy_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_deploy_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.deploy_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_deploy_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_deploy_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_deploy_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.DeployModelRequest.pb(service.DeployModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.DeployModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.deploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_deploy_model_rest_bad_request(transport: str = 'rest', request_type=service.DeployModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.deploy_model(request) + + +def test_deploy_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.deploy_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:deploy" % client.transport._host, args[1]) + + +def test_deploy_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + service.DeployModelRequest(), + name='name_value', + ) + + +def test_deploy_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.UndeployModelRequest, + dict, +]) +def test_undeploy_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.undeploy_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_undeploy_model_rest_required_fields(request_type=service.UndeployModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.undeploy_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_undeploy_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.undeploy_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_undeploy_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_undeploy_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_undeploy_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.UndeployModelRequest.pb(service.UndeployModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.UndeployModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.undeploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_undeploy_model_rest_bad_request(transport: str = 'rest', request_type=service.UndeployModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.undeploy_model(request) + + +def test_undeploy_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.undeploy_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:undeploy" % client.transport._host, args[1]) + + +def test_undeploy_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + service.UndeployModelRequest(), + name='name_value', + ) + + +def test_undeploy_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportModelRequest, + dict, +]) +def test_export_model_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.export_model(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_export_model_rest_required_fields(request_type=service.ExportModelRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.export_model(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_export_model_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.export_model._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_export_model_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_export_model") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_model") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ExportModelRequest.pb(service.ExportModelRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.ExportModelRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.export_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_export_model_rest_bad_request(transport: str = 'rest', request_type=service.ExportModelRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.export_model(request) + + +def test_export_model_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.export_model(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:export" % client.transport._host, args[1]) + + +def test_export_model_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + service.ExportModelRequest(), + name='name_value', + output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +def test_export_model_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ExportEvaluatedExamplesRequest, + dict, +]) +def test_export_evaluated_examples_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.export_evaluated_examples(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_export_evaluated_examples_rest_required_fields(request_type=service.ExportEvaluatedExamplesRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_evaluated_examples._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_evaluated_examples._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.export_evaluated_examples(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_export_evaluated_examples_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.export_evaluated_examples._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_export_evaluated_examples_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_export_evaluated_examples") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_evaluated_examples") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ExportEvaluatedExamplesRequest.pb(service.ExportEvaluatedExamplesRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = service.ExportEvaluatedExamplesRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.export_evaluated_examples(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_export_evaluated_examples_rest_bad_request(transport: str = 'rest', request_type=service.ExportEvaluatedExamplesRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.export_evaluated_examples(request) + + +def test_export_evaluated_examples_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.export_evaluated_examples(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples" % client.transport._host, args[1]) + + +def test_export_evaluated_examples_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_evaluated_examples( + service.ExportEvaluatedExamplesRequest(), + name='name_value', + output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), + ) + + +def test_export_evaluated_examples_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.GetModelEvaluationRequest, + dict, +]) +def test_get_model_evaluation_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model_evaluation.ModelEvaluation( + name='name_value', + annotation_spec_id='annotation_spec_id_value', + display_name='display_name_value', + evaluated_example_count=2446, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_evaluation.ModelEvaluation.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_model_evaluation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.annotation_spec_id == 'annotation_spec_id_value' + assert response.display_name == 'display_name_value' + assert response.evaluated_example_count == 2446 + + +def test_get_model_evaluation_rest_required_fields(request_type=service.GetModelEvaluationRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_evaluation.ModelEvaluation() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model_evaluation.ModelEvaluation.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_model_evaluation(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_model_evaluation_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_model_evaluation._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_evaluation_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model_evaluation") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model_evaluation") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.GetModelEvaluationRequest.pb(service.GetModelEvaluationRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model_evaluation.ModelEvaluation.to_json(model_evaluation.ModelEvaluation()) + + request = service.GetModelEvaluationRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_evaluation.ModelEvaluation() + + client.get_model_evaluation(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_evaluation_rest_bad_request(transport: str = 'rest', request_type=service.GetModelEvaluationRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model_evaluation(request) + + +def test_get_model_evaluation_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = model_evaluation.ModelEvaluation() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_evaluation.ModelEvaluation.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_model_evaluation(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" % client.transport._host, args[1]) + + +def test_get_model_evaluation_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + service.GetModelEvaluationRequest(), + name='name_value', + ) + + +def test_get_model_evaluation_rest_error(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + service.ListModelEvaluationsRequest, + dict, +]) +def test_list_model_evaluations_rest(request_type): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelEvaluationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_model_evaluations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_rest_required_fields(request_type=service.ListModelEvaluationsRequest): + transport_class = transports.AutoMlRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = service.ListModelEvaluationsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = service.ListModelEvaluationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_model_evaluations(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_model_evaluations_rest_unset_required_fields(): + transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_model_evaluations._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_model_evaluations_rest_interceptors(null_interceptor): + transport = transports.AutoMlRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), + ) + client = AutoMlClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.AutoMlRestInterceptor, "post_list_model_evaluations") as post, \ + mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_model_evaluations") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = service.ListModelEvaluationsRequest.pb(service.ListModelEvaluationsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = service.ListModelEvaluationsResponse.to_json(service.ListModelEvaluationsResponse()) + + request = service.ListModelEvaluationsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = service.ListModelEvaluationsResponse() + + client.list_model_evaluations(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_model_evaluations_rest_bad_request(transport: str = 'rest', request_type=service.ListModelEvaluationsRequest): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_model_evaluations(request) + + +def test_list_model_evaluations_rest_flattened(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = service.ListModelEvaluationsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = service.ListModelEvaluationsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_model_evaluations(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations" % client.transport._host, args[1]) + + +def test_list_model_evaluations_rest_flattened_error(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluations_rest_pager(transport: str = 'rest'): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[], + next_page_token='def', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + service.ListModelEvaluationsResponse( + model_evaluation=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(service.ListModelEvaluationsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} + + pager = client.list_model_evaluations(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) + + pages = list(client.list_model_evaluations(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = AutoMlClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = AutoMlClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoMlClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AutoMlClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoMlGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoMlGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.AutoMlGrpcTransport, + transports.AutoMlGrpcAsyncIOTransport, + transports.AutoMlRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = AutoMlClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.AutoMlGrpcTransport, + ) + +def test_auto_ml_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.AutoMlTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_auto_ml_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.AutoMlTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_dataset', + 'get_dataset', + 'list_datasets', + 'update_dataset', + 'delete_dataset', + 'import_data', + 'export_data', + 'get_annotation_spec', + 'get_table_spec', + 'list_table_specs', + 'update_table_spec', + 'get_column_spec', + 'list_column_specs', + 'update_column_spec', + 'create_model', + 'get_model', + 'list_models', + 'delete_model', + 'deploy_model', + 'undeploy_model', + 'export_model', + 'export_evaluated_examples', + 'get_model_evaluation', + 'list_model_evaluations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_auto_ml_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoMlTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_auto_ml_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoMlTransport() + adc.assert_called_once() + + +def test_auto_ml_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AutoMlClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoMlGrpcTransport, + transports.AutoMlGrpcAsyncIOTransport, + ], +) +def test_auto_ml_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoMlGrpcTransport, + transports.AutoMlGrpcAsyncIOTransport, + transports.AutoMlRestTransport, + ], +) +def test_auto_ml_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.AutoMlGrpcTransport, grpc_helpers), + (transports.AutoMlGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_auto_ml_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) +def test_auto_ml_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_auto_ml_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.AutoMlRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_auto_ml_rest_lro_client(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_auto_ml_host_no_port(transport_name): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_auto_ml_host_with_port(transport_name): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_auto_ml_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = AutoMlClient( + credentials=creds1, + transport=transport_name, + ) + client2 = AutoMlClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_dataset._session + session2 = client2.transport.create_dataset._session + assert session1 != session2 + session1 = client1.transport.get_dataset._session + session2 = client2.transport.get_dataset._session + assert session1 != session2 + session1 = client1.transport.list_datasets._session + session2 = client2.transport.list_datasets._session + assert session1 != session2 + session1 = client1.transport.update_dataset._session + session2 = client2.transport.update_dataset._session + assert session1 != session2 + session1 = client1.transport.delete_dataset._session + session2 = client2.transport.delete_dataset._session + assert session1 != session2 + session1 = client1.transport.import_data._session + session2 = client2.transport.import_data._session + assert session1 != session2 + session1 = client1.transport.export_data._session + session2 = client2.transport.export_data._session + assert session1 != session2 + session1 = client1.transport.get_annotation_spec._session + session2 = client2.transport.get_annotation_spec._session + assert session1 != session2 + session1 = client1.transport.get_table_spec._session + session2 = client2.transport.get_table_spec._session + assert session1 != session2 + session1 = client1.transport.list_table_specs._session + session2 = client2.transport.list_table_specs._session + assert session1 != session2 + session1 = client1.transport.update_table_spec._session + session2 = client2.transport.update_table_spec._session + assert session1 != session2 + session1 = client1.transport.get_column_spec._session + session2 = client2.transport.get_column_spec._session + assert session1 != session2 + session1 = client1.transport.list_column_specs._session + session2 = client2.transport.list_column_specs._session + assert session1 != session2 + session1 = client1.transport.update_column_spec._session + session2 = client2.transport.update_column_spec._session + assert session1 != session2 + session1 = client1.transport.create_model._session + session2 = client2.transport.create_model._session + assert session1 != session2 + session1 = client1.transport.get_model._session + session2 = client2.transport.get_model._session + assert session1 != session2 + session1 = client1.transport.list_models._session + session2 = client2.transport.list_models._session + assert session1 != session2 + session1 = client1.transport.delete_model._session + session2 = client2.transport.delete_model._session + assert session1 != session2 + session1 = client1.transport.deploy_model._session + session2 = client2.transport.deploy_model._session + assert session1 != session2 + session1 = client1.transport.undeploy_model._session + session2 = client2.transport.undeploy_model._session + assert session1 != session2 + session1 = client1.transport.export_model._session + session2 = client2.transport.export_model._session + assert session1 != session2 + session1 = client1.transport.export_evaluated_examples._session + session2 = client2.transport.export_evaluated_examples._session + assert session1 != session2 + session1 = client1.transport.get_model_evaluation._session + session2 = client2.transport.get_model_evaluation._session + assert session1 != session2 + session1 = client1.transport.list_model_evaluations._session + session2 = client2.transport.list_model_evaluations._session + assert session1 != session2 +def test_auto_ml_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoMlGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_auto_ml_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.AutoMlGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) +def test_auto_ml_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) +def test_auto_ml_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_auto_ml_grpc_lro_client(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_auto_ml_grpc_lro_async_client(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotation_spec_path(): + project = "squid" + location = "clam" + dataset = "whelk" + annotation_spec = "octopus" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + actual = AutoMlClient.annotation_spec_path(project, location, dataset, annotation_spec) + assert expected == actual + + +def test_parse_annotation_spec_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "dataset": "cuttlefish", + "annotation_spec": "mussel", + } + path = AutoMlClient.annotation_spec_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_annotation_spec_path(path) + assert expected == actual + +def test_column_spec_path(): + project = "winkle" + location = "nautilus" + dataset = "scallop" + table_spec = "abalone" + column_spec = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, column_spec=column_spec, ) + actual = AutoMlClient.column_spec_path(project, location, dataset, table_spec, column_spec) + assert expected == actual + + +def test_parse_column_spec_path(): + expected = { + "project": "clam", + "location": "whelk", + "dataset": "octopus", + "table_spec": "oyster", + "column_spec": "nudibranch", + } + path = AutoMlClient.column_spec_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_column_spec_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = AutoMlClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = AutoMlClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_dataset_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = AutoMlClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = AutoMlClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_model_path(path) + assert expected == actual + +def test_model_evaluation_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + model_evaluation = "nautilus" + expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) + actual = AutoMlClient.model_evaluation_path(project, location, model, model_evaluation) + assert expected == actual + + +def test_parse_model_evaluation_path(): + expected = { + "project": "scallop", + "location": "abalone", + "model": "squid", + "model_evaluation": "clam", + } + path = AutoMlClient.model_evaluation_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_model_evaluation_path(path) + assert expected == actual + +def test_table_spec_path(): + project = "whelk" + location = "octopus" + dataset = "oyster" + table_spec = "nudibranch" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, ) + actual = AutoMlClient.table_spec_path(project, location, dataset, table_spec) + assert expected == actual + + +def test_parse_table_spec_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + "dataset": "winkle", + "table_spec": "nautilus", + } + path = AutoMlClient.table_spec_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_table_spec_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = AutoMlClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = AutoMlClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = AutoMlClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = AutoMlClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = AutoMlClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = AutoMlClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = AutoMlClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = AutoMlClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = AutoMlClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = AutoMlClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AutoMlClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: + transport_class = AutoMlClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = AutoMlAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = AutoMlClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (AutoMlClient, transports.AutoMlGrpcTransport), + (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py new file mode 100644 index 00000000..55cc6d75 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py @@ -0,0 +1,2270 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.automl_v1beta1.services.prediction_service import PredictionServiceAsyncClient +from google.cloud.automl_v1beta1.services.prediction_service import PredictionServiceClient +from google.cloud.automl_v1beta1.services.prediction_service import transports +from google.cloud.automl_v1beta1.types import annotation_payload +from google.cloud.automl_v1beta1.types import data_items +from google.cloud.automl_v1beta1.types import geometry +from google.cloud.automl_v1beta1.types import io +from google.cloud.automl_v1beta1.types import operations +from google.cloud.automl_v1beta1.types import prediction_service +from google.cloud.automl_v1beta1.types import text_segment +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import struct_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), + (PredictionServiceClient, "rest"), +]) +def test_prediction_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PredictionServiceGrpcTransport, "grpc"), + (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.PredictionServiceRestTransport, "rest"), +]) +def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), + (PredictionServiceClient, "rest"), +]) +def test_prediction_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://automl.googleapis.com' + ) + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + available_transports = [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceRestTransport, + ] + assert transport in available_transports + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "true"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "false"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, PredictionServiceAsyncClient +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), +]) +def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", None), +]) +def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_prediction_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.PredictRequest, + dict, +]) +def test_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse( + ) + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + +@pytest.mark.asyncio +async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( + )) + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +@pytest.mark.asyncio +async def test_predict_async_from_dict(): + await test_predict_async(request_type=dict) + + +def test_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = prediction_service.PredictResponse() + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].payload + mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + + +def test_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].payload + mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.BatchPredictRequest, + dict, +]) +def test_batch_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.BatchPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + client.batch_predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.BatchPredictRequest() + +@pytest.mark.asyncio +async def test_batch_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.BatchPredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.BatchPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_predict_async_from_dict(): + await test_batch_predict_async(request_type=dict) + + +def test_batch_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.BatchPredictRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_batch_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_predict( + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + arg = args[0].output_config + mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + + +def test_batch_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_predict( + prediction_service.BatchPredictRequest(), + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + +@pytest.mark.asyncio +async def test_batch_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_predict( + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].input_config + mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) + assert arg == mock_val + arg = args[0].output_config + mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + arg = args[0].params + mock_val = {'key_value': 'value_value'} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_predict( + prediction_service.BatchPredictRequest(), + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.PredictRequest, + dict, +]) +def test_predict_rest(request_type): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = prediction_service.PredictResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = prediction_service.PredictResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.predict(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + + +def test_predict_rest_required_fields(request_type=prediction_service.PredictRequest): + transport_class = transports.PredictionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = prediction_service.PredictResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = prediction_service.PredictResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.predict(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_predict_rest_unset_required_fields(): + transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.predict._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "payload", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_predict_rest_interceptors(null_interceptor): + transport = transports.PredictionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), + ) + client = PredictionServiceClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "post_predict") as post, \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_predict") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = prediction_service.PredictRequest.pb(prediction_service.PredictRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = prediction_service.PredictResponse.to_json(prediction_service.PredictResponse()) + + request = prediction_service.PredictRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = prediction_service.PredictResponse() + + client.predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.PredictRequest): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.predict(request) + + +def test_predict_rest_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = prediction_service.PredictResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = prediction_service.PredictResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.predict(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:predict" % client.transport._host, args[1]) + + +def test_predict_rest_flattened_error(transport: str = 'rest'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + name='name_value', + payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), + params={'key_value': 'value_value'}, + ) + + +def test_predict_rest_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.BatchPredictRequest, + dict, +]) +def test_batch_predict_rest(request_type): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.batch_predict(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_batch_predict_rest_required_fields(request_type=prediction_service.BatchPredictRequest): + transport_class = transports.PredictionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.batch_predict(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_batch_predict_rest_unset_required_fields(): + transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.batch_predict._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "inputConfig", "outputConfig", "params", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_predict_rest_interceptors(null_interceptor): + transport = transports.PredictionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), + ) + client = PredictionServiceClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "post_batch_predict") as post, \ + mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_batch_predict") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = prediction_service.BatchPredictRequest.pb(prediction_service.BatchPredictRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = prediction_service.BatchPredictRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.batch_predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.BatchPredictRequest): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_predict(request) + + +def test_batch_predict_rest_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.batch_predict(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict" % client.transport._host, args[1]) + + +def test_batch_predict_rest_flattened_error(transport: str = 'rest'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_predict( + prediction_service.BatchPredictRequest(), + name='name_value', + input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), + output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + params={'key_value': 'value_value'}, + ) + + +def test_batch_predict_rest_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + transports.PredictionServiceRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = PredictionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PredictionServiceGrpcTransport, + ) + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'predict', + 'batch_predict', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_prediction_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport() + adc.assert_called_once() + + +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +def test_prediction_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + transports.PredictionServiceRestTransport, + ], +) +def test_prediction_service_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PredictionServiceGrpcTransport, grpc_helpers), + (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "automl.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="automl.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_prediction_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.PredictionServiceRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_prediction_service_rest_lro_client(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_prediction_service_host_no_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_prediction_service_host_with_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'automl.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://automl.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_prediction_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = PredictionServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = PredictionServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.predict._session + session2 = client2.transport.predict._session + assert session1 != session2 + session1 = client1.transport.batch_predict._session + session2 = client2.transport.batch_predict._session + assert session1 != session2 +def test_prediction_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_prediction_service_grpc_lro_client(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_prediction_service_grpc_lro_async_client(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PredictionServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = PredictionServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_model_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PredictionServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = PredictionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = PredictionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = PredictionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PredictionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = PredictionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = PredictionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = PredictionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PredictionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = PredictionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) From f01752015ebf7e7289a31c13ff72de0a8f3c04fb Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 17 Oct 2023 20:05:40 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot=20po?= =?UTF-8?q?st-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- owl-bot-staging/v1/.coveragerc | 13 - owl-bot-staging/v1/.flake8 | 33 - owl-bot-staging/v1/MANIFEST.in | 2 - owl-bot-staging/v1/README.rst | 49 - owl-bot-staging/v1/docs/_static/custom.css | 3 - owl-bot-staging/v1/docs/automl_v1/auto_ml.rst | 10 - .../v1/docs/automl_v1/prediction_service.rst | 6 - .../v1/docs/automl_v1/services.rst | 7 - owl-bot-staging/v1/docs/automl_v1/types.rst | 6 - owl-bot-staging/v1/docs/conf.py | 376 - owl-bot-staging/v1/docs/index.rst | 7 - .../v1/google/cloud/automl/__init__.py | 195 - .../v1/google/cloud/automl/gapic_version.py | 16 - .../v1/google/cloud/automl/py.typed | 2 - .../v1/google/cloud/automl_v1/__init__.py | 196 - .../cloud/automl_v1/gapic_metadata.json | 347 - .../google/cloud/automl_v1/gapic_version.py | 16 - .../v1/google/cloud/automl_v1/py.typed | 2 - .../cloud/automl_v1/services/__init__.py | 15 - .../automl_v1/services/auto_ml/__init__.py | 22 - .../services/auto_ml/async_client.py | 2507 --- .../automl_v1/services/auto_ml/client.py | 2675 --- .../automl_v1/services/auto_ml/pagers.py | 384 - .../services/auto_ml/transports/__init__.py | 38 - .../services/auto_ml/transports/base.py | 462 - .../services/auto_ml/transports/grpc.py | 796 - .../auto_ml/transports/grpc_asyncio.py | 795 - .../services/auto_ml/transports/rest.py | 2366 --- .../services/prediction_service/__init__.py | 22 - .../prediction_service/async_client.py | 656 - .../services/prediction_service/client.py | 858 - .../prediction_service/transports/__init__.py | 38 - .../prediction_service/transports/base.py | 169 - .../prediction_service/transports/grpc.py | 367 - .../transports/grpc_asyncio.py | 366 - .../prediction_service/transports/rest.py | 484 - .../google/cloud/automl_v1/types/__init__.py | 220 - .../automl_v1/types/annotation_payload.py | 126 - .../cloud/automl_v1/types/annotation_spec.py | 61 - .../cloud/automl_v1/types/classification.py | 310 - .../cloud/automl_v1/types/data_items.py | 337 - .../google/cloud/automl_v1/types/dataset.py | 181 - .../google/cloud/automl_v1/types/detection.py | 165 - .../google/cloud/automl_v1/types/geometry.py | 75 - .../v1/google/cloud/automl_v1/types/image.py | 318 - .../v1/google/cloud/automl_v1/types/io.py | 1572 -- .../v1/google/cloud/automl_v1/types/model.py | 201 - .../cloud/automl_v1/types/model_evaluation.py | 167 - .../cloud/automl_v1/types/operations.py | 330 - .../automl_v1/types/prediction_service.py | 302 - .../google/cloud/automl_v1/types/service.py | 621 - .../v1/google/cloud/automl_v1/types/text.py | 104 - .../cloud/automl_v1/types/text_extraction.py | 125 - .../cloud/automl_v1/types/text_segment.py | 63 - .../cloud/automl_v1/types/text_sentiment.py | 132 - .../cloud/automl_v1/types/translation.py | 125 - owl-bot-staging/v1/mypy.ini | 3 - owl-bot-staging/v1/noxfile.py | 184 - ..._generated_auto_ml_create_dataset_async.py | 61 - ...1_generated_auto_ml_create_dataset_sync.py | 61 - ...v1_generated_auto_ml_create_model_async.py | 56 - ..._v1_generated_auto_ml_create_model_sync.py | 56 - ..._generated_auto_ml_delete_dataset_async.py | 56 - ...1_generated_auto_ml_delete_dataset_sync.py | 56 - ...v1_generated_auto_ml_delete_model_async.py | 56 - ..._v1_generated_auto_ml_delete_model_sync.py | 56 - ...v1_generated_auto_ml_deploy_model_async.py | 56 - ..._v1_generated_auto_ml_deploy_model_sync.py | 56 - ..._v1_generated_auto_ml_export_data_async.py | 60 - ...l_v1_generated_auto_ml_export_data_sync.py | 60 - ...v1_generated_auto_ml_export_model_async.py | 60 - ..._v1_generated_auto_ml_export_model_sync.py | 60 - ...rated_auto_ml_get_annotation_spec_async.py | 52 - ...erated_auto_ml_get_annotation_spec_sync.py | 52 - ..._v1_generated_auto_ml_get_dataset_async.py | 52 - ...l_v1_generated_auto_ml_get_dataset_sync.py | 52 - ...ml_v1_generated_auto_ml_get_model_async.py | 52 - ...ated_auto_ml_get_model_evaluation_async.py | 52 - ...rated_auto_ml_get_model_evaluation_sync.py | 52 - ...oml_v1_generated_auto_ml_get_model_sync.py | 52 - ..._v1_generated_auto_ml_import_data_async.py | 60 - ...l_v1_generated_auto_ml_import_data_sync.py | 60 - ...1_generated_auto_ml_list_datasets_async.py | 53 - ...v1_generated_auto_ml_list_datasets_sync.py | 53 - ...ed_auto_ml_list_model_evaluations_async.py | 54 - ...ted_auto_ml_list_model_evaluations_sync.py | 54 - ..._v1_generated_auto_ml_list_models_async.py | 53 - ...l_v1_generated_auto_ml_list_models_sync.py | 53 - ..._generated_auto_ml_undeploy_model_async.py | 56 - ...1_generated_auto_ml_undeploy_model_sync.py | 56 - ..._generated_auto_ml_update_dataset_async.py | 56 - ...1_generated_auto_ml_update_dataset_sync.py | 56 - ...v1_generated_auto_ml_update_model_async.py | 51 - ..._v1_generated_auto_ml_update_model_sync.py | 51 - ..._prediction_service_batch_predict_async.py | 64 - ...d_prediction_service_batch_predict_sync.py | 64 - ...erated_prediction_service_predict_async.py | 56 - ...nerated_prediction_service_predict_sync.py | 56 - ...ippet_metadata_google.cloud.automl.v1.json | 3339 ---- .../v1/scripts/fixup_automl_v1_keywords.py | 195 - owl-bot-staging/v1/setup.py | 90 - .../v1/testing/constraints-3.10.txt | 6 - .../v1/testing/constraints-3.11.txt | 6 - .../v1/testing/constraints-3.12.txt | 6 - .../v1/testing/constraints-3.7.txt | 9 - .../v1/testing/constraints-3.8.txt | 6 - .../v1/testing/constraints-3.9.txt | 6 - owl-bot-staging/v1/tests/__init__.py | 16 - owl-bot-staging/v1/tests/unit/__init__.py | 16 - .../v1/tests/unit/gapic/__init__.py | 16 - .../v1/tests/unit/gapic/automl_v1/__init__.py | 16 - .../unit/gapic/automl_v1/test_auto_ml.py | 10997 ------------ .../automl_v1/test_prediction_service.py | 2269 --- owl-bot-staging/v1beta1/.coveragerc | 13 - owl-bot-staging/v1beta1/.flake8 | 33 - owl-bot-staging/v1beta1/MANIFEST.in | 2 - owl-bot-staging/v1beta1/README.rst | 49 - .../v1beta1/docs/_static/custom.css | 3 - .../v1beta1/docs/automl_v1beta1/auto_ml.rst | 10 - .../automl_v1beta1/prediction_service.rst | 6 - .../v1beta1/docs/automl_v1beta1/services.rst | 7 - .../v1beta1/docs/automl_v1beta1/types.rst | 6 - owl-bot-staging/v1beta1/docs/conf.py | 376 - owl-bot-staging/v1beta1/docs/index.rst | 7 - .../v1beta1/google/cloud/automl/__init__.py | 275 - .../google/cloud/automl/gapic_version.py | 16 - .../v1beta1/google/cloud/automl/py.typed | 2 - .../google/cloud/automl_v1beta1/__init__.py | 276 - .../cloud/automl_v1beta1/gapic_metadata.json | 437 - .../cloud/automl_v1beta1/gapic_version.py | 16 - .../google/cloud/automl_v1beta1/py.typed | 2 - .../cloud/automl_v1beta1/services/__init__.py | 15 - .../services/auto_ml/__init__.py | 22 - .../services/auto_ml/async_client.py | 3170 ---- .../automl_v1beta1/services/auto_ml/client.py | 3335 ---- .../automl_v1beta1/services/auto_ml/pagers.py | 628 - .../services/auto_ml/transports/__init__.py | 38 - .../services/auto_ml/transports/base.py | 570 - .../services/auto_ml/transports/grpc.py | 971 -- .../auto_ml/transports/grpc_asyncio.py | 970 -- .../services/auto_ml/transports/rest.py | 3091 ---- .../services/prediction_service/__init__.py | 22 - .../prediction_service/async_client.py | 621 - .../services/prediction_service/client.py | 823 - .../prediction_service/transports/__init__.py | 38 - .../prediction_service/transports/base.py | 169 - .../prediction_service/transports/grpc.py | 348 - .../transports/grpc_asyncio.py | 347 - .../prediction_service/transports/rest.py | 484 - .../cloud/automl_v1beta1/types/__init__.py | 318 - .../types/annotation_payload.py | 158 - .../automl_v1beta1/types/annotation_spec.py | 62 - .../automl_v1beta1/types/classification.py | 379 - .../cloud/automl_v1beta1/types/column_spec.py | 120 - .../cloud/automl_v1beta1/types/data_items.py | 398 - .../cloud/automl_v1beta1/types/data_stats.py | 361 - .../cloud/automl_v1beta1/types/data_types.py | 180 - .../cloud/automl_v1beta1/types/dataset.py | 198 - .../cloud/automl_v1beta1/types/detection.py | 264 - .../cloud/automl_v1beta1/types/geometry.py | 75 - .../cloud/automl_v1beta1/types/image.py | 304 - .../google/cloud/automl_v1beta1/types/io.py | 1253 -- .../cloud/automl_v1beta1/types/model.py | 208 - .../automl_v1beta1/types/model_evaluation.py | 196 - .../cloud/automl_v1beta1/types/operations.py | 392 - .../types/prediction_service.py | 285 - .../cloud/automl_v1beta1/types/ranges.py | 51 - .../cloud/automl_v1beta1/types/regression.py | 71 - .../cloud/automl_v1beta1/types/service.py | 874 - .../cloud/automl_v1beta1/types/table_spec.py | 111 - .../cloud/automl_v1beta1/types/tables.py | 426 - .../cloud/automl_v1beta1/types/temporal.py | 60 - .../google/cloud/automl_v1beta1/types/text.py | 119 - .../automl_v1beta1/types/text_extraction.py | 125 - .../automl_v1beta1/types/text_segment.py | 63 - .../automl_v1beta1/types/text_sentiment.py | 139 - .../cloud/automl_v1beta1/types/translation.py | 125 - .../cloud/automl_v1beta1/types/video.py | 56 - owl-bot-staging/v1beta1/mypy.ini | 3 - owl-bot-staging/v1beta1/noxfile.py | 184 - ..._generated_auto_ml_create_dataset_async.py | 57 - ...1_generated_auto_ml_create_dataset_sync.py | 57 - ...a1_generated_auto_ml_create_model_async.py | 56 - ...ta1_generated_auto_ml_create_model_sync.py | 56 - ..._generated_auto_ml_delete_dataset_async.py | 56 - ...1_generated_auto_ml_delete_dataset_sync.py | 56 - ...a1_generated_auto_ml_delete_model_async.py | 56 - ...ta1_generated_auto_ml_delete_model_sync.py | 56 - ...a1_generated_auto_ml_deploy_model_async.py | 56 - ...ta1_generated_auto_ml_deploy_model_sync.py | 56 - ...ta1_generated_auto_ml_export_data_async.py | 56 - ...eta1_generated_auto_ml_export_data_sync.py | 56 - ...auto_ml_export_evaluated_examples_async.py | 56 - ..._auto_ml_export_evaluated_examples_sync.py | 56 - ...a1_generated_auto_ml_export_model_async.py | 56 - ...ta1_generated_auto_ml_export_model_sync.py | 56 - ...rated_auto_ml_get_annotation_spec_async.py | 52 - ...erated_auto_ml_get_annotation_spec_sync.py | 52 - ...generated_auto_ml_get_column_spec_async.py | 52 - ..._generated_auto_ml_get_column_spec_sync.py | 52 - ...ta1_generated_auto_ml_get_dataset_async.py | 52 - ...eta1_generated_auto_ml_get_dataset_sync.py | 52 - ...beta1_generated_auto_ml_get_model_async.py | 52 - ...ated_auto_ml_get_model_evaluation_async.py | 52 - ...rated_auto_ml_get_model_evaluation_sync.py | 52 - ...1beta1_generated_auto_ml_get_model_sync.py | 52 - ..._generated_auto_ml_get_table_spec_async.py | 52 - ...1_generated_auto_ml_get_table_spec_sync.py | 52 - ...ta1_generated_auto_ml_import_data_async.py | 56 - ...eta1_generated_auto_ml_import_data_sync.py | 56 - ...nerated_auto_ml_list_column_specs_async.py | 53 - ...enerated_auto_ml_list_column_specs_sync.py | 53 - ...1_generated_auto_ml_list_datasets_async.py | 53 - ...a1_generated_auto_ml_list_datasets_sync.py | 53 - ...ed_auto_ml_list_model_evaluations_async.py | 53 - ...ted_auto_ml_list_model_evaluations_sync.py | 53 - ...ta1_generated_auto_ml_list_models_async.py | 53 - ...eta1_generated_auto_ml_list_models_sync.py | 53 - ...enerated_auto_ml_list_table_specs_async.py | 53 - ...generated_auto_ml_list_table_specs_sync.py | 53 - ..._generated_auto_ml_undeploy_model_async.py | 56 - ...1_generated_auto_ml_undeploy_model_sync.py | 56 - ...erated_auto_ml_update_column_spec_async.py | 51 - ...nerated_auto_ml_update_column_spec_sync.py | 51 - ..._generated_auto_ml_update_dataset_async.py | 56 - ...1_generated_auto_ml_update_dataset_sync.py | 56 - ...nerated_auto_ml_update_table_spec_async.py | 51 - ...enerated_auto_ml_update_table_spec_sync.py | 51 - ..._prediction_service_batch_predict_async.py | 56 - ...d_prediction_service_batch_predict_sync.py | 56 - ...erated_prediction_service_predict_async.py | 56 - ...nerated_prediction_service_predict_sync.py | 56 - ..._metadata_google.cloud.automl.v1beta1.json | 4289 ----- .../scripts/fixup_automl_v1beta1_keywords.py | 201 - owl-bot-staging/v1beta1/setup.py | 90 - .../v1beta1/testing/constraints-3.10.txt | 6 - .../v1beta1/testing/constraints-3.11.txt | 6 - .../v1beta1/testing/constraints-3.12.txt | 6 - .../v1beta1/testing/constraints-3.7.txt | 9 - .../v1beta1/testing/constraints-3.8.txt | 6 - .../v1beta1/testing/constraints-3.9.txt | 6 - owl-bot-staging/v1beta1/tests/__init__.py | 16 - .../v1beta1/tests/unit/__init__.py | 16 - .../v1beta1/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/automl_v1beta1/__init__.py | 16 - .../unit/gapic/automl_v1beta1/test_auto_ml.py | 14494 ---------------- .../automl_v1beta1/test_prediction_service.py | 2270 --- ...ippet_metadata_google.cloud.automl.v1.json | 2 +- ..._metadata_google.cloud.automl.v1beta1.json | 2 +- tests/unit/gapic/automl_v1/test_auto_ml.py | 36 +- .../unit/gapic/automl_v1beta1/test_auto_ml.py | 45 +- 251 files changed, 56 insertions(+), 88301 deletions(-) delete mode 100644 owl-bot-staging/v1/.coveragerc delete mode 100644 owl-bot-staging/v1/.flake8 delete mode 100644 owl-bot-staging/v1/MANIFEST.in delete mode 100644 owl-bot-staging/v1/README.rst delete mode 100644 owl-bot-staging/v1/docs/_static/custom.css delete mode 100644 owl-bot-staging/v1/docs/automl_v1/auto_ml.rst delete mode 100644 owl-bot-staging/v1/docs/automl_v1/prediction_service.rst delete mode 100644 owl-bot-staging/v1/docs/automl_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/automl_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/conf.py delete mode 100644 owl-bot-staging/v1/docs/index.rst delete mode 100644 owl-bot-staging/v1/google/cloud/automl/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl/gapic_version.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/image.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/io.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/model.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/service.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py delete mode 100644 owl-bot-staging/v1/mypy.ini delete mode 100644 owl-bot-staging/v1/noxfile.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py delete mode 100644 owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json delete mode 100644 owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py delete mode 100644 owl-bot-staging/v1/setup.py delete mode 100644 owl-bot-staging/v1/testing/constraints-3.10.txt delete mode 100644 owl-bot-staging/v1/testing/constraints-3.11.txt delete mode 100644 owl-bot-staging/v1/testing/constraints-3.12.txt delete mode 100644 owl-bot-staging/v1/testing/constraints-3.7.txt delete mode 100644 owl-bot-staging/v1/testing/constraints-3.8.txt delete mode 100644 owl-bot-staging/v1/testing/constraints-3.9.txt delete mode 100644 owl-bot-staging/v1/tests/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py delete mode 100644 owl-bot-staging/v1beta1/.coveragerc delete mode 100644 owl-bot-staging/v1beta1/.flake8 delete mode 100644 owl-bot-staging/v1beta1/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta1/README.rst delete mode 100644 owl-bot-staging/v1beta1/docs/_static/custom.css delete mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst delete mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/conf.py delete mode 100644 owl-bot-staging/v1beta1/docs/index.rst delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py delete mode 100644 owl-bot-staging/v1beta1/mypy.ini delete mode 100644 owl-bot-staging/v1beta1/noxfile.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py delete mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/setup.py delete mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.10.txt delete mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.11.txt delete mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.12.txt delete mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.7.txt delete mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.8.txt delete mode 100644 owl-bot-staging/v1beta1/testing/constraints-3.9.txt delete mode 100644 owl-bot-staging/v1beta1/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc deleted file mode 100644 index 8705cefd..00000000 --- a/owl-bot-staging/v1/.coveragerc +++ /dev/null @@ -1,13 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/automl/__init__.py - google/cloud/automl/gapic_version.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ diff --git a/owl-bot-staging/v1/.flake8 b/owl-bot-staging/v1/.flake8 deleted file mode 100644 index 29227d4c..00000000 --- a/owl-bot-staging/v1/.flake8 +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - **/services/** - **/types/** - *_pb2.py - - # Standard linting exemptions. - **/.nox/** - __pycache__, - .git, - *.pyc, - conf.py diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in deleted file mode 100644 index f376b2aa..00000000 --- a/owl-bot-staging/v1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/automl *.py -recursive-include google/cloud/automl_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst deleted file mode 100644 index d0dde648..00000000 --- a/owl-bot-staging/v1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Automl API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Automl API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/_static/custom.css b/owl-bot-staging/v1/docs/_static/custom.css deleted file mode 100644 index 06423be0..00000000 --- a/owl-bot-staging/v1/docs/_static/custom.css +++ /dev/null @@ -1,3 +0,0 @@ -dl.field-list > dt { - min-width: 100px -} diff --git a/owl-bot-staging/v1/docs/automl_v1/auto_ml.rst b/owl-bot-staging/v1/docs/automl_v1/auto_ml.rst deleted file mode 100644 index c8994a59..00000000 --- a/owl-bot-staging/v1/docs/automl_v1/auto_ml.rst +++ /dev/null @@ -1,10 +0,0 @@ -AutoMl ------------------------- - -.. automodule:: google.cloud.automl_v1.services.auto_ml - :members: - :inherited-members: - -.. automodule:: google.cloud.automl_v1.services.auto_ml.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/automl_v1/prediction_service.rst b/owl-bot-staging/v1/docs/automl_v1/prediction_service.rst deleted file mode 100644 index d8f6da92..00000000 --- a/owl-bot-staging/v1/docs/automl_v1/prediction_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -PredictionService ------------------------------------ - -.. automodule:: google.cloud.automl_v1.services.prediction_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/automl_v1/services.rst b/owl-bot-staging/v1/docs/automl_v1/services.rst deleted file mode 100644 index ce8e2c3d..00000000 --- a/owl-bot-staging/v1/docs/automl_v1/services.rst +++ /dev/null @@ -1,7 +0,0 @@ -Services for Google Cloud Automl v1 API -======================================= -.. toctree:: - :maxdepth: 2 - - auto_ml - prediction_service diff --git a/owl-bot-staging/v1/docs/automl_v1/types.rst b/owl-bot-staging/v1/docs/automl_v1/types.rst deleted file mode 100644 index 14a31a9e..00000000 --- a/owl-bot-staging/v1/docs/automl_v1/types.rst +++ /dev/null @@ -1,6 +0,0 @@ -Types for Google Cloud Automl v1 API -==================================== - -.. automodule:: google.cloud.automl_v1.types - :members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py deleted file mode 100644 index 708bcaa7..00000000 --- a/owl-bot-staging/v1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-automl documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "4.0.1" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The root toctree document. -root_doc = "index" - -# General information about the project. -project = u"google-cloud-automl" -copyright = u"2023, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-automl-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - root_doc, - "google-cloud-automl.tex", - u"google-cloud-automl Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - root_doc, - "google-cloud-automl", - u"Google Cloud Automl Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - root_doc, - "google-cloud-automl", - u"google-cloud-automl Documentation", - author, - "google-cloud-automl", - "GAPIC library for Google Cloud Automl API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst deleted file mode 100644 index b5adf159..00000000 --- a/owl-bot-staging/v1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - automl_v1/services - automl_v1/types diff --git a/owl-bot-staging/v1/google/cloud/automl/__init__.py b/owl-bot-staging/v1/google/cloud/automl/__init__.py deleted file mode 100644 index e8f62a9d..00000000 --- a/owl-bot-staging/v1/google/cloud/automl/__init__.py +++ /dev/null @@ -1,195 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.automl import gapic_version as package_version - -__version__ = package_version.__version__ - - -from google.cloud.automl_v1.services.auto_ml.client import AutoMlClient -from google.cloud.automl_v1.services.auto_ml.async_client import AutoMlAsyncClient -from google.cloud.automl_v1.services.prediction_service.client import PredictionServiceClient -from google.cloud.automl_v1.services.prediction_service.async_client import PredictionServiceAsyncClient - -from google.cloud.automl_v1.types.annotation_payload import AnnotationPayload -from google.cloud.automl_v1.types.annotation_spec import AnnotationSpec -from google.cloud.automl_v1.types.classification import ClassificationAnnotation -from google.cloud.automl_v1.types.classification import ClassificationEvaluationMetrics -from google.cloud.automl_v1.types.classification import ClassificationType -from google.cloud.automl_v1.types.data_items import Document -from google.cloud.automl_v1.types.data_items import DocumentDimensions -from google.cloud.automl_v1.types.data_items import ExamplePayload -from google.cloud.automl_v1.types.data_items import Image -from google.cloud.automl_v1.types.data_items import TextSnippet -from google.cloud.automl_v1.types.dataset import Dataset -from google.cloud.automl_v1.types.detection import BoundingBoxMetricsEntry -from google.cloud.automl_v1.types.detection import ImageObjectDetectionAnnotation -from google.cloud.automl_v1.types.detection import ImageObjectDetectionEvaluationMetrics -from google.cloud.automl_v1.types.geometry import BoundingPoly -from google.cloud.automl_v1.types.geometry import NormalizedVertex -from google.cloud.automl_v1.types.image import ImageClassificationDatasetMetadata -from google.cloud.automl_v1.types.image import ImageClassificationModelDeploymentMetadata -from google.cloud.automl_v1.types.image import ImageClassificationModelMetadata -from google.cloud.automl_v1.types.image import ImageObjectDetectionDatasetMetadata -from google.cloud.automl_v1.types.image import ImageObjectDetectionModelDeploymentMetadata -from google.cloud.automl_v1.types.image import ImageObjectDetectionModelMetadata -from google.cloud.automl_v1.types.io import BatchPredictInputConfig -from google.cloud.automl_v1.types.io import BatchPredictOutputConfig -from google.cloud.automl_v1.types.io import DocumentInputConfig -from google.cloud.automl_v1.types.io import GcsDestination -from google.cloud.automl_v1.types.io import GcsSource -from google.cloud.automl_v1.types.io import InputConfig -from google.cloud.automl_v1.types.io import ModelExportOutputConfig -from google.cloud.automl_v1.types.io import OutputConfig -from google.cloud.automl_v1.types.model import Model -from google.cloud.automl_v1.types.model_evaluation import ModelEvaluation -from google.cloud.automl_v1.types.operations import BatchPredictOperationMetadata -from google.cloud.automl_v1.types.operations import CreateDatasetOperationMetadata -from google.cloud.automl_v1.types.operations import CreateModelOperationMetadata -from google.cloud.automl_v1.types.operations import DeleteOperationMetadata -from google.cloud.automl_v1.types.operations import DeployModelOperationMetadata -from google.cloud.automl_v1.types.operations import ExportDataOperationMetadata -from google.cloud.automl_v1.types.operations import ExportModelOperationMetadata -from google.cloud.automl_v1.types.operations import ImportDataOperationMetadata -from google.cloud.automl_v1.types.operations import OperationMetadata -from google.cloud.automl_v1.types.operations import UndeployModelOperationMetadata -from google.cloud.automl_v1.types.prediction_service import BatchPredictRequest -from google.cloud.automl_v1.types.prediction_service import BatchPredictResult -from google.cloud.automl_v1.types.prediction_service import PredictRequest -from google.cloud.automl_v1.types.prediction_service import PredictResponse -from google.cloud.automl_v1.types.service import CreateDatasetRequest -from google.cloud.automl_v1.types.service import CreateModelRequest -from google.cloud.automl_v1.types.service import DeleteDatasetRequest -from google.cloud.automl_v1.types.service import DeleteModelRequest -from google.cloud.automl_v1.types.service import DeployModelRequest -from google.cloud.automl_v1.types.service import ExportDataRequest -from google.cloud.automl_v1.types.service import ExportModelRequest -from google.cloud.automl_v1.types.service import GetAnnotationSpecRequest -from google.cloud.automl_v1.types.service import GetDatasetRequest -from google.cloud.automl_v1.types.service import GetModelEvaluationRequest -from google.cloud.automl_v1.types.service import GetModelRequest -from google.cloud.automl_v1.types.service import ImportDataRequest -from google.cloud.automl_v1.types.service import ListDatasetsRequest -from google.cloud.automl_v1.types.service import ListDatasetsResponse -from google.cloud.automl_v1.types.service import ListModelEvaluationsRequest -from google.cloud.automl_v1.types.service import ListModelEvaluationsResponse -from google.cloud.automl_v1.types.service import ListModelsRequest -from google.cloud.automl_v1.types.service import ListModelsResponse -from google.cloud.automl_v1.types.service import UndeployModelRequest -from google.cloud.automl_v1.types.service import UpdateDatasetRequest -from google.cloud.automl_v1.types.service import UpdateModelRequest -from google.cloud.automl_v1.types.text import TextClassificationDatasetMetadata -from google.cloud.automl_v1.types.text import TextClassificationModelMetadata -from google.cloud.automl_v1.types.text import TextExtractionDatasetMetadata -from google.cloud.automl_v1.types.text import TextExtractionModelMetadata -from google.cloud.automl_v1.types.text import TextSentimentDatasetMetadata -from google.cloud.automl_v1.types.text import TextSentimentModelMetadata -from google.cloud.automl_v1.types.text_extraction import TextExtractionAnnotation -from google.cloud.automl_v1.types.text_extraction import TextExtractionEvaluationMetrics -from google.cloud.automl_v1.types.text_segment import TextSegment -from google.cloud.automl_v1.types.text_sentiment import TextSentimentAnnotation -from google.cloud.automl_v1.types.text_sentiment import TextSentimentEvaluationMetrics -from google.cloud.automl_v1.types.translation import TranslationAnnotation -from google.cloud.automl_v1.types.translation import TranslationDatasetMetadata -from google.cloud.automl_v1.types.translation import TranslationEvaluationMetrics -from google.cloud.automl_v1.types.translation import TranslationModelMetadata - -__all__ = ('AutoMlClient', - 'AutoMlAsyncClient', - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', - 'AnnotationPayload', - 'AnnotationSpec', - 'ClassificationAnnotation', - 'ClassificationEvaluationMetrics', - 'ClassificationType', - 'Document', - 'DocumentDimensions', - 'ExamplePayload', - 'Image', - 'TextSnippet', - 'Dataset', - 'BoundingBoxMetricsEntry', - 'ImageObjectDetectionAnnotation', - 'ImageObjectDetectionEvaluationMetrics', - 'BoundingPoly', - 'NormalizedVertex', - 'ImageClassificationDatasetMetadata', - 'ImageClassificationModelDeploymentMetadata', - 'ImageClassificationModelMetadata', - 'ImageObjectDetectionDatasetMetadata', - 'ImageObjectDetectionModelDeploymentMetadata', - 'ImageObjectDetectionModelMetadata', - 'BatchPredictInputConfig', - 'BatchPredictOutputConfig', - 'DocumentInputConfig', - 'GcsDestination', - 'GcsSource', - 'InputConfig', - 'ModelExportOutputConfig', - 'OutputConfig', - 'Model', - 'ModelEvaluation', - 'BatchPredictOperationMetadata', - 'CreateDatasetOperationMetadata', - 'CreateModelOperationMetadata', - 'DeleteOperationMetadata', - 'DeployModelOperationMetadata', - 'ExportDataOperationMetadata', - 'ExportModelOperationMetadata', - 'ImportDataOperationMetadata', - 'OperationMetadata', - 'UndeployModelOperationMetadata', - 'BatchPredictRequest', - 'BatchPredictResult', - 'PredictRequest', - 'PredictResponse', - 'CreateDatasetRequest', - 'CreateModelRequest', - 'DeleteDatasetRequest', - 'DeleteModelRequest', - 'DeployModelRequest', - 'ExportDataRequest', - 'ExportModelRequest', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'GetModelEvaluationRequest', - 'GetModelRequest', - 'ImportDataRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UndeployModelRequest', - 'UpdateDatasetRequest', - 'UpdateModelRequest', - 'TextClassificationDatasetMetadata', - 'TextClassificationModelMetadata', - 'TextExtractionDatasetMetadata', - 'TextExtractionModelMetadata', - 'TextSentimentDatasetMetadata', - 'TextSentimentModelMetadata', - 'TextExtractionAnnotation', - 'TextExtractionEvaluationMetrics', - 'TextSegment', - 'TextSentimentAnnotation', - 'TextSentimentEvaluationMetrics', - 'TranslationAnnotation', - 'TranslationDatasetMetadata', - 'TranslationEvaluationMetrics', - 'TranslationModelMetadata', -) diff --git a/owl-bot-staging/v1/google/cloud/automl/gapic_version.py b/owl-bot-staging/v1/google/cloud/automl/gapic_version.py deleted file mode 100644 index 360a0d13..00000000 --- a/owl-bot-staging/v1/google/cloud/automl/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1/google/cloud/automl/py.typed b/owl-bot-staging/v1/google/cloud/automl/py.typed deleted file mode 100644 index 0560ba18..00000000 --- a/owl-bot-staging/v1/google/cloud/automl/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/__init__.py deleted file mode 100644 index eea87cfa..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/__init__.py +++ /dev/null @@ -1,196 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.automl_v1 import gapic_version as package_version - -__version__ = package_version.__version__ - - -from .services.auto_ml import AutoMlClient -from .services.auto_ml import AutoMlAsyncClient -from .services.prediction_service import PredictionServiceClient -from .services.prediction_service import PredictionServiceAsyncClient - -from .types.annotation_payload import AnnotationPayload -from .types.annotation_spec import AnnotationSpec -from .types.classification import ClassificationAnnotation -from .types.classification import ClassificationEvaluationMetrics -from .types.classification import ClassificationType -from .types.data_items import Document -from .types.data_items import DocumentDimensions -from .types.data_items import ExamplePayload -from .types.data_items import Image -from .types.data_items import TextSnippet -from .types.dataset import Dataset -from .types.detection import BoundingBoxMetricsEntry -from .types.detection import ImageObjectDetectionAnnotation -from .types.detection import ImageObjectDetectionEvaluationMetrics -from .types.geometry import BoundingPoly -from .types.geometry import NormalizedVertex -from .types.image import ImageClassificationDatasetMetadata -from .types.image import ImageClassificationModelDeploymentMetadata -from .types.image import ImageClassificationModelMetadata -from .types.image import ImageObjectDetectionDatasetMetadata -from .types.image import ImageObjectDetectionModelDeploymentMetadata -from .types.image import ImageObjectDetectionModelMetadata -from .types.io import BatchPredictInputConfig -from .types.io import BatchPredictOutputConfig -from .types.io import DocumentInputConfig -from .types.io import GcsDestination -from .types.io import GcsSource -from .types.io import InputConfig -from .types.io import ModelExportOutputConfig -from .types.io import OutputConfig -from .types.model import Model -from .types.model_evaluation import ModelEvaluation -from .types.operations import BatchPredictOperationMetadata -from .types.operations import CreateDatasetOperationMetadata -from .types.operations import CreateModelOperationMetadata -from .types.operations import DeleteOperationMetadata -from .types.operations import DeployModelOperationMetadata -from .types.operations import ExportDataOperationMetadata -from .types.operations import ExportModelOperationMetadata -from .types.operations import ImportDataOperationMetadata -from .types.operations import OperationMetadata -from .types.operations import UndeployModelOperationMetadata -from .types.prediction_service import BatchPredictRequest -from .types.prediction_service import BatchPredictResult -from .types.prediction_service import PredictRequest -from .types.prediction_service import PredictResponse -from .types.service import CreateDatasetRequest -from .types.service import CreateModelRequest -from .types.service import DeleteDatasetRequest -from .types.service import DeleteModelRequest -from .types.service import DeployModelRequest -from .types.service import ExportDataRequest -from .types.service import ExportModelRequest -from .types.service import GetAnnotationSpecRequest -from .types.service import GetDatasetRequest -from .types.service import GetModelEvaluationRequest -from .types.service import GetModelRequest -from .types.service import ImportDataRequest -from .types.service import ListDatasetsRequest -from .types.service import ListDatasetsResponse -from .types.service import ListModelEvaluationsRequest -from .types.service import ListModelEvaluationsResponse -from .types.service import ListModelsRequest -from .types.service import ListModelsResponse -from .types.service import UndeployModelRequest -from .types.service import UpdateDatasetRequest -from .types.service import UpdateModelRequest -from .types.text import TextClassificationDatasetMetadata -from .types.text import TextClassificationModelMetadata -from .types.text import TextExtractionDatasetMetadata -from .types.text import TextExtractionModelMetadata -from .types.text import TextSentimentDatasetMetadata -from .types.text import TextSentimentModelMetadata -from .types.text_extraction import TextExtractionAnnotation -from .types.text_extraction import TextExtractionEvaluationMetrics -from .types.text_segment import TextSegment -from .types.text_sentiment import TextSentimentAnnotation -from .types.text_sentiment import TextSentimentEvaluationMetrics -from .types.translation import TranslationAnnotation -from .types.translation import TranslationDatasetMetadata -from .types.translation import TranslationEvaluationMetrics -from .types.translation import TranslationModelMetadata - -__all__ = ( - 'AutoMlAsyncClient', - 'PredictionServiceAsyncClient', -'AnnotationPayload', -'AnnotationSpec', -'AutoMlClient', -'BatchPredictInputConfig', -'BatchPredictOperationMetadata', -'BatchPredictOutputConfig', -'BatchPredictRequest', -'BatchPredictResult', -'BoundingBoxMetricsEntry', -'BoundingPoly', -'ClassificationAnnotation', -'ClassificationEvaluationMetrics', -'ClassificationType', -'CreateDatasetOperationMetadata', -'CreateDatasetRequest', -'CreateModelOperationMetadata', -'CreateModelRequest', -'Dataset', -'DeleteDatasetRequest', -'DeleteModelRequest', -'DeleteOperationMetadata', -'DeployModelOperationMetadata', -'DeployModelRequest', -'Document', -'DocumentDimensions', -'DocumentInputConfig', -'ExamplePayload', -'ExportDataOperationMetadata', -'ExportDataRequest', -'ExportModelOperationMetadata', -'ExportModelRequest', -'GcsDestination', -'GcsSource', -'GetAnnotationSpecRequest', -'GetDatasetRequest', -'GetModelEvaluationRequest', -'GetModelRequest', -'Image', -'ImageClassificationDatasetMetadata', -'ImageClassificationModelDeploymentMetadata', -'ImageClassificationModelMetadata', -'ImageObjectDetectionAnnotation', -'ImageObjectDetectionDatasetMetadata', -'ImageObjectDetectionEvaluationMetrics', -'ImageObjectDetectionModelDeploymentMetadata', -'ImageObjectDetectionModelMetadata', -'ImportDataOperationMetadata', -'ImportDataRequest', -'InputConfig', -'ListDatasetsRequest', -'ListDatasetsResponse', -'ListModelEvaluationsRequest', -'ListModelEvaluationsResponse', -'ListModelsRequest', -'ListModelsResponse', -'Model', -'ModelEvaluation', -'ModelExportOutputConfig', -'NormalizedVertex', -'OperationMetadata', -'OutputConfig', -'PredictRequest', -'PredictResponse', -'PredictionServiceClient', -'TextClassificationDatasetMetadata', -'TextClassificationModelMetadata', -'TextExtractionAnnotation', -'TextExtractionDatasetMetadata', -'TextExtractionEvaluationMetrics', -'TextExtractionModelMetadata', -'TextSegment', -'TextSentimentAnnotation', -'TextSentimentDatasetMetadata', -'TextSentimentEvaluationMetrics', -'TextSentimentModelMetadata', -'TextSnippet', -'TranslationAnnotation', -'TranslationDatasetMetadata', -'TranslationEvaluationMetrics', -'TranslationModelMetadata', -'UndeployModelOperationMetadata', -'UndeployModelRequest', -'UpdateDatasetRequest', -'UpdateModelRequest', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json deleted file mode 100644 index 7d017052..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/gapic_metadata.json +++ /dev/null @@ -1,347 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.automl_v1", - "protoPackage": "google.cloud.automl.v1", - "schema": "1.0", - "services": { - "AutoMl": { - "clients": { - "grpc": { - "libraryClient": "AutoMlClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "CreateModel": { - "methods": [ - "create_model" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - } - } - }, - "grpc-async": { - "libraryClient": "AutoMlAsyncClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "CreateModel": { - "methods": [ - "create_model" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - } - } - }, - "rest": { - "libraryClient": "AutoMlClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "CreateModel": { - "methods": [ - "create_model" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - } - } - } - } - }, - "PredictionService": { - "clients": { - "grpc": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "BatchPredict": { - "methods": [ - "batch_predict" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PredictionServiceAsyncClient", - "rpcs": { - "BatchPredict": { - "methods": [ - "batch_predict" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - }, - "rest": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "BatchPredict": { - "methods": [ - "batch_predict" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py b/owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py deleted file mode 100644 index 360a0d13..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/py.typed b/owl-bot-staging/v1/google/cloud/automl_v1/py.typed deleted file mode 100644 index 0560ba18..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py deleted file mode 100644 index 89a37dc9..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py deleted file mode 100644 index 8f53357e..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import AutoMlClient -from .async_client import AutoMlAsyncClient - -__all__ = ( - 'AutoMlClient', - 'AutoMlAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py deleted file mode 100644 index 5de13fa6..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/async_client.py +++ /dev/null @@ -1,2507 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union - -from google.cloud.automl_v1 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1.services.auto_ml import pagers -from google.cloud.automl_v1.types import annotation_spec -from google.cloud.automl_v1.types import classification -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import detection -from google.cloud.automl_v1.types import image -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import operations -from google.cloud.automl_v1.types import service -from google.cloud.automl_v1.types import text -from google.cloud.automl_v1.types import text_extraction -from google.cloud.automl_v1.types import text_sentiment -from google.cloud.automl_v1.types import translation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport -from .client import AutoMlClient - - -class AutoMlAsyncClient: - """AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - """ - - _client: AutoMlClient - - DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT - - annotation_spec_path = staticmethod(AutoMlClient.annotation_spec_path) - parse_annotation_spec_path = staticmethod(AutoMlClient.parse_annotation_spec_path) - dataset_path = staticmethod(AutoMlClient.dataset_path) - parse_dataset_path = staticmethod(AutoMlClient.parse_dataset_path) - model_path = staticmethod(AutoMlClient.model_path) - parse_model_path = staticmethod(AutoMlClient.parse_model_path) - model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path) - parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path) - common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(AutoMlClient.parse_common_billing_account_path) - common_folder_path = staticmethod(AutoMlClient.common_folder_path) - parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path) - common_organization_path = staticmethod(AutoMlClient.common_organization_path) - parse_common_organization_path = staticmethod(AutoMlClient.parse_common_organization_path) - common_project_path = staticmethod(AutoMlClient.common_project_path) - parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path) - common_location_path = staticmethod(AutoMlClient.common_location_path) - parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlAsyncClient: The constructed client. - """ - return AutoMlClient.from_service_account_info.__func__(AutoMlAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlAsyncClient: The constructed client. - """ - return AutoMlClient.from_service_account_file.__func__(AutoMlAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return AutoMlClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> AutoMlTransport: - """Returns the transport used by the client instance. - - Returns: - AutoMlTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(AutoMlClient).get_transport_class, type(AutoMlClient)) - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, AutoMlTransport] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the auto ml client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.AutoMlTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = AutoMlClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_dataset(self, - request: Optional[Union[service.CreateDatasetRequest, dict]] = None, - *, - parent: Optional[str] = None, - dataset: Optional[gca_dataset.Dataset] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_create_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - operation = client.create_dataset(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.CreateDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. - parent (:class:`str`): - Required. The resource name of the - project to create the dataset for. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (:class:`google.cloud.automl_v1.types.Dataset`): - Required. The dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.automl_v1.types.Dataset` A workspace for solving a single, particular machine learning (ML) problem. - A workspace contains examples that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.CreateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_dataset.Dataset, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def get_dataset(self, - request: Optional[Union[service.GetDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_get_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = await client.get_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.GetDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. - name (:class:`str`): - Required. The resource name of the - dataset to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_datasets(self, - request: Optional[Union[service.ListDatasetsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsAsyncPager: - r"""Lists datasets in a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_list_datasets(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.ListDatasetsRequest, dict]]): - The request object. Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - parent (:class:`str`): - Required. The resource name of the - project from which to list datasets. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsAsyncPager: - Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListDatasetsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_datasets, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDatasetsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_dataset(self, - request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, - *, - dataset: Optional[gca_dataset.Dataset] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_update_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = await client.update_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.UpdateDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - dataset (:class:`google.cloud.automl_v1.types.Dataset`): - Required. The dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.UpdateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_dataset(self, - request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_delete_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.DeleteDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - name (:class:`str`): - Required. The resource name of the - dataset to delete. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.DeleteDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def import_data(self, - request: Optional[Union[service.ImportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.InputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_import_data(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - input_config = automl_v1.InputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - request = automl_v1.ImportDataRequest( - name="name_value", - input_config=input_config, - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.ImportDataRequest, dict]]): - The request object. Request message for - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - name (:class:`str`): - Required. Dataset name. Dataset must - already exist. All imported annotations - and examples will be added. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (:class:`google.cloud.automl_v1.types.InputConfig`): - Required. The desired input location - and its domain specific semantics, if - any. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ImportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def export_data(self, - request: Optional[Union[service.ExportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.OutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_export_data(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - output_config = automl_v1.OutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportDataRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.ExportDataRequest, dict]]): - The request object. Request message for - [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - name (:class:`str`): - Required. The resource name of the - dataset. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.automl_v1.types.OutputConfig`): - Required. The desired output - location. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ExportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def get_annotation_spec(self, - request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_get_annotation_spec(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_annotation_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.GetAnnotationSpecRequest, dict]]): - The request object. Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. - name (:class:`str`): - Required. The resource name of the - annotation spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.AnnotationSpec: - A definition of an annotation spec. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetAnnotationSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_annotation_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_model(self, - request: Optional[Union[service.CreateModelRequest, dict]] = None, - *, - parent: Optional[str] = None, - model: Optional[gca_model.Model] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_create_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.CreateModelRequest, dict]]): - The request object. Request message for - [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - parent (:class:`str`): - Required. Resource name of the parent - project where the model is being - created. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (:class:`google.cloud.automl_v1.types.Model`): - Required. The model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.automl_v1.types.Model` API proto - representing a trained machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.CreateModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_model.Model, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model(self, - request: Optional[Union[service.GetModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a model. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_get_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.GetModelRequest, dict]]): - The request object. Request message for - [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - name (:class:`str`): - Required. Resource name of the model. - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Model: - API proto representing a trained - machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_models(self, - request: Optional[Union[service.ListModelsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsAsyncPager: - r"""Lists models. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_list_models(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.ListModelsRequest, dict]]): - The request object. Request message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - parent (:class:`str`): - Required. Resource name of the - project, from which to list the models. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.services.auto_ml.pagers.ListModelsAsyncPager: - Response message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListModelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_models, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_model(self, - request: Optional[Union[service.DeleteModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_delete_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.DeleteModelRequest, dict]]): - The request object. Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - name (:class:`str`): - Required. Resource name of the model - being deleted. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.DeleteModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def update_model(self, - request: Optional[Union[service.UpdateModelRequest, dict]] = None, - *, - model: Optional[gca_model.Model] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a model. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_update_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.UpdateModelRequest( - ) - - # Make the request - response = await client.update_model(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.UpdateModelRequest, dict]]): - The request object. Request message for - [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - model (:class:`google.cloud.automl_v1.types.Model`): - Required. The model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Model: - API proto representing a trained - machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.UpdateModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def deploy_model(self, - request: Optional[Union[service.DeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_deploy_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.DeployModelRequest, dict]]): - The request object. Request message for - [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - name (:class:`str`): - Required. Resource name of the model - to deploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.DeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_model(self, - request: Optional[Union[service.UndeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_undeploy_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.UndeployModelRequest, dict]]): - The request object. Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - name (:class:`str`): - Required. Resource name of the model - to undeploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.UndeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def export_model(self, - request: Optional[Union[service.ExportModelRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.ModelExportOutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_export_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - output_config = automl_v1.ModelExportOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportModelRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.ExportModelRequest, dict]]): - The request object. Request message for - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an - error code will be returned. - name (:class:`str`): - Required. The resource name of the - model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.automl_v1.types.ModelExportOutputConfig`): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ExportModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation(self, - request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a model evaluation. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_get_model_evaluation(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model_evaluation(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.GetModelEvaluationRequest, dict]]): - The request object. Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. - name (:class:`str`): - Required. Resource name for the model - evaluation. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.ModelEvaluation: - Evaluation results of a model. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetModelEvaluationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluations(self, - request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, - *, - parent: Optional[str] = None, - filter: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsAsyncPager: - r"""Lists model evaluations. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_list_model_evaluations(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.ListModelEvaluationsRequest( - parent="parent_value", - filter="filter_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.ListModelEvaluationsRequest, dict]]): - The request object. Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - parent (:class:`str`): - Required. Resource name of the model - to list the model evaluations for. If - modelId is set as "-", this will list - model evaluations from across all models - of the parent location. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (:class:`str`): - Required. An expression for filtering the results of the - request. - - - ``annotation_spec_id`` - for =, != or existence. See - example below for the last. - - Some examples of using the filter are: - - - ``annotation_spec_id!=4`` --> The model evaluation - was done for annotation spec with ID different than - 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation - was done for aggregate of all annotation specs. - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: - Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, filter]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListModelEvaluationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self) -> "AutoMlAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "AutoMlAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py deleted file mode 100644 index e91d6dee..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/client.py +++ /dev/null @@ -1,2675 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast - -from google.cloud.automl_v1 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1.services.auto_ml import pagers -from google.cloud.automl_v1.types import annotation_spec -from google.cloud.automl_v1.types import classification -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import detection -from google.cloud.automl_v1.types import image -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import operations -from google.cloud.automl_v1.types import service -from google.cloud.automl_v1.types import text -from google.cloud.automl_v1.types import text_extraction -from google.cloud.automl_v1.types import text_sentiment -from google.cloud.automl_v1.types import translation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import AutoMlGrpcTransport -from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport -from .transports.rest import AutoMlRestTransport - - -class AutoMlClientMeta(type): - """Metaclass for the AutoMl client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] - _transport_registry["grpc"] = AutoMlGrpcTransport - _transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport - _transport_registry["rest"] = AutoMlRestTransport - - def get_transport_class(cls, - label: Optional[str] = None, - ) -> Type[AutoMlTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class AutoMlClient(metaclass=AutoMlClientMeta): - """AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "automl.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> AutoMlTransport: - """Returns the transport used by the client instance. - - Returns: - AutoMlTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: - """Returns a fully-qualified annotation_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - - @staticmethod - def parse_annotation_spec_path(path: str) -> Dict[str,str]: - """Parses a annotation_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_path(project: str,location: str,model: str,model_evaluation: str,) -> str: - """Returns a fully-qualified model_evaluation string.""" - return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) - - @staticmethod - def parse_model_evaluation_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, AutoMlTransport]] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the auto ml client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, AutoMlTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) - - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) - - api_key_value = getattr(client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError("client_options.api_key and credentials are mutually exclusive") - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, AutoMlTransport): - # transport is a AutoMlTransport instance. - if credentials or client_options.credentials_file or api_key_value: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - import google.auth._default # type: ignore - - if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): - credentials = google.auth._default.get_api_key_credentials(api_key_value) - - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=client_options.api_audience, - ) - - def create_dataset(self, - request: Optional[Union[service.CreateDatasetRequest, dict]] = None, - *, - parent: Optional[str] = None, - dataset: Optional[gca_dataset.Dataset] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_create_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - operation = client.create_dataset(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.CreateDatasetRequest, dict]): - The request object. Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. - parent (str): - Required. The resource name of the - project to create the dataset for. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (google.cloud.automl_v1.types.Dataset): - Required. The dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.automl_v1.types.Dataset` A workspace for solving a single, particular machine learning (ML) problem. - A workspace contains examples that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.CreateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.CreateDatasetRequest): - request = service.CreateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gca_dataset.Dataset, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def get_dataset(self, - request: Optional[Union[service.GetDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_get_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = client.get_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.GetDatasetRequest, dict]): - The request object. Request message for - [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. - name (str): - Required. The resource name of the - dataset to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetDatasetRequest): - request = service.GetDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_datasets(self, - request: Optional[Union[service.ListDatasetsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsPager: - r"""Lists datasets in a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_list_datasets(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.ListDatasetsRequest, dict]): - The request object. Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - parent (str): - Required. The resource name of the - project from which to list datasets. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsPager: - Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListDatasetsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListDatasetsRequest): - request = service.ListDatasetsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_datasets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDatasetsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_dataset(self, - request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, - *, - dataset: Optional[gca_dataset.Dataset] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_update_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = client.update_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.UpdateDatasetRequest, dict]): - The request object. Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - dataset (google.cloud.automl_v1.types.Dataset): - Required. The dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.UpdateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.UpdateDatasetRequest): - request = service.UpdateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_dataset(self, - request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_delete_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.DeleteDatasetRequest, dict]): - The request object. Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - name (str): - Required. The resource name of the - dataset to delete. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.DeleteDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.DeleteDatasetRequest): - request = service.DeleteDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def import_data(self, - request: Optional[Union[service.ImportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.InputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_import_data(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - input_config = automl_v1.InputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - request = automl_v1.ImportDataRequest( - name="name_value", - input_config=input_config, - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.ImportDataRequest, dict]): - The request object. Request message for - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - name (str): - Required. Dataset name. Dataset must - already exist. All imported annotations - and examples will be added. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (google.cloud.automl_v1.types.InputConfig): - Required. The desired input location - and its domain specific semantics, if - any. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ImportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ImportDataRequest): - request = service.ImportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def export_data(self, - request: Optional[Union[service.ExportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.OutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_export_data(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - output_config = automl_v1.OutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportDataRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.ExportDataRequest, dict]): - The request object. Request message for - [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - name (str): - Required. The resource name of the - dataset. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.automl_v1.types.OutputConfig): - Required. The desired output - location. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ExportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ExportDataRequest): - request = service.ExportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def get_annotation_spec(self, - request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_get_annotation_spec(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_annotation_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.GetAnnotationSpecRequest, dict]): - The request object. Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. - name (str): - Required. The resource name of the - annotation spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.AnnotationSpec: - A definition of an annotation spec. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetAnnotationSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetAnnotationSpecRequest): - request = service.GetAnnotationSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_model(self, - request: Optional[Union[service.CreateModelRequest, dict]] = None, - *, - parent: Optional[str] = None, - model: Optional[gca_model.Model] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_create_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.CreateModelRequest, dict]): - The request object. Request message for - [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - parent (str): - Required. Resource name of the parent - project where the model is being - created. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (google.cloud.automl_v1.types.Model): - Required. The model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.automl_v1.types.Model` API proto - representing a trained machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.CreateModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.CreateModelRequest): - request = service.CreateModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gca_model.Model, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def get_model(self, - request: Optional[Union[service.GetModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a model. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_get_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = client.get_model(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.GetModelRequest, dict]): - The request object. Request message for - [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - name (str): - Required. Resource name of the model. - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Model: - API proto representing a trained - machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetModelRequest): - request = service.GetModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_models(self, - request: Optional[Union[service.ListModelsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsPager: - r"""Lists models. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_list_models(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.ListModelsRequest, dict]): - The request object. Request message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - parent (str): - Required. Resource name of the - project, from which to list the models. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.services.auto_ml.pagers.ListModelsPager: - Response message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListModelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListModelsRequest): - request = service.ListModelsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_models] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_model(self, - request: Optional[Union[service.DeleteModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_delete_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.DeleteModelRequest, dict]): - The request object. Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - name (str): - Required. Resource name of the model - being deleted. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.DeleteModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.DeleteModelRequest): - request = service.DeleteModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def update_model(self, - request: Optional[Union[service.UpdateModelRequest, dict]] = None, - *, - model: Optional[gca_model.Model] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a model. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_update_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.UpdateModelRequest( - ) - - # Make the request - response = client.update_model(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.UpdateModelRequest, dict]): - The request object. Request message for - [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - model (google.cloud.automl_v1.types.Model): - Required. The model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.Model: - API proto representing a trained - machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.UpdateModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.UpdateModelRequest): - request = service.UpdateModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def deploy_model(self, - request: Optional[Union[service.DeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_deploy_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.DeployModelRequest, dict]): - The request object. Request message for - [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - name (str): - Required. Resource name of the model - to deploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.DeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.DeployModelRequest): - request = service.DeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_model(self, - request: Optional[Union[service.UndeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_undeploy_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.UndeployModelRequest, dict]): - The request object. Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - name (str): - Required. Resource name of the model - to undeploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.UndeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.UndeployModelRequest): - request = service.UndeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def export_model(self, - request: Optional[Union[service.ExportModelRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.ModelExportOutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_export_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - output_config = automl_v1.ModelExportOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportModelRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.ExportModelRequest, dict]): - The request object. Request message for - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an - error code will be returned. - name (str): - Required. The resource name of the - model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.automl_v1.types.ModelExportOutputConfig): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ExportModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ExportModelRequest): - request = service.ExportModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation(self, - request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a model evaluation. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_get_model_evaluation(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = client.get_model_evaluation(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.GetModelEvaluationRequest, dict]): - The request object. Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. - name (str): - Required. Resource name for the model - evaluation. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.ModelEvaluation: - Evaluation results of a model. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetModelEvaluationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetModelEvaluationRequest): - request = service.GetModelEvaluationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluations(self, - request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, - *, - parent: Optional[str] = None, - filter: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsPager: - r"""Lists model evaluations. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_list_model_evaluations(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.ListModelEvaluationsRequest( - parent="parent_value", - filter="filter_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.ListModelEvaluationsRequest, dict]): - The request object. Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - parent (str): - Required. Resource name of the model - to list the model evaluations for. If - modelId is set as "-", this will list - model evaluations from across all models - of the parent location. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - filter (str): - Required. An expression for filtering the results of the - request. - - - ``annotation_spec_id`` - for =, != or existence. See - example below for the last. - - Some examples of using the filter are: - - - ``annotation_spec_id!=4`` --> The model evaluation - was done for annotation spec with ID different than - 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation - was done for aggregate of all annotation specs. - - This corresponds to the ``filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsPager: - Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, filter]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListModelEvaluationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListModelEvaluationsRequest): - request = service.ListModelEvaluationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if filter is not None: - request.filter = filter - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self) -> "AutoMlClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - - - - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "AutoMlClient", -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py deleted file mode 100644 index 017e6bd6..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/pagers.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import service - - -class ListDatasetsPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListDatasetsResponse], - request: service.ListDatasetsRequest, - response: service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.automl_v1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[dataset.Dataset]: - for page in self.pages: - yield from page.datasets - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDatasetsAsyncPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListDatasetsResponse]], - request: service.ListDatasetsRequest, - response: service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.automl_v1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[dataset.Dataset]: - async def async_generator(): - async for page in self.pages: - for response in page.datasets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1.types.ListModelsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``model`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListModelsResponse], - request: service.ListModelsRequest, - response: service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1.types.ListModelsRequest): - The initial request object. - response (google.cloud.automl_v1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model.Model]: - for page in self.pages: - yield from page.model - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsAsyncPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1.types.ListModelsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``model`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListModelsResponse]], - request: service.ListModelsRequest, - response: service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1.types.ListModelsRequest): - The initial request object. - response (google.cloud.automl_v1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[model.Model]: - async def async_generator(): - async for page in self.pages: - for response in page.model: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluation`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluation`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListModelEvaluationsResponse], - request: service.ListModelEvaluationsRequest, - response: service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.automl_v1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: - for page in self.pages: - yield from page.model_evaluation - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsAsyncPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluation`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluation`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListModelEvaluationsResponse]], - request: service.ListModelEvaluationsRequest, - response: service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.automl_v1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluation: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py deleted file mode 100644 index 9d86479d..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import AutoMlTransport -from .grpc import AutoMlGrpcTransport -from .grpc_asyncio import AutoMlGrpcAsyncIOTransport -from .rest import AutoMlRestTransport -from .rest import AutoMlRestInterceptor - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] -_transport_registry['grpc'] = AutoMlGrpcTransport -_transport_registry['grpc_asyncio'] = AutoMlGrpcAsyncIOTransport -_transport_registry['rest'] = AutoMlRestTransport - -__all__ = ( - 'AutoMlTransport', - 'AutoMlGrpcTransport', - 'AutoMlGrpcAsyncIOTransport', - 'AutoMlRestTransport', - 'AutoMlRestInterceptor', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py deleted file mode 100644 index 8c7d2bf9..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/base.py +++ /dev/null @@ -1,462 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.automl_v1 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.automl_v1.types import annotation_spec -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import service -from google.longrunning import operations_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -class AutoMlTransport(abc.ABC): - """Abstract transport class for AutoMl.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'automl.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience(api_audience if api_audience else host) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.import_data: gapic_v1.method.wrap_method( - self.import_data, - default_timeout=5.0, - client_info=client_info, - ), - self.export_data: gapic_v1.method.wrap_method( - self.export_data, - default_timeout=5.0, - client_info=client_info, - ), - self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.create_model: gapic_v1.method.wrap_method( - self.create_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model: gapic_v1.method.wrap_method( - self.get_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_models: gapic_v1.method.wrap_method( - self.list_models, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.update_model: gapic_v1.method.wrap_method( - self.update_model, - default_timeout=5.0, - client_info=client_info, - ), - self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, - default_timeout=5.0, - client_info=client_info, - ), - self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, - default_timeout=5.0, - client_info=client_info, - ), - self.export_model: gapic_v1.method.wrap_method( - self.export_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluations: gapic_v1.method.wrap_method( - self.list_model_evaluations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - Union[ - dataset.Dataset, - Awaitable[dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - Union[ - service.ListDatasetsResponse, - Awaitable[service.ListDatasetsResponse] - ]]: - raise NotImplementedError() - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - Union[ - gca_dataset.Dataset, - Awaitable[gca_dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - Union[ - annotation_spec.AnnotationSpec, - Awaitable[annotation_spec.AnnotationSpec] - ]]: - raise NotImplementedError() - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - Union[ - model.Model, - Awaitable[model.Model] - ]]: - raise NotImplementedError() - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - Union[ - service.ListModelsResponse, - Awaitable[service.ListModelsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_model(self) -> Callable[ - [service.UpdateModelRequest], - Union[ - gca_model.Model, - Awaitable[gca_model.Model] - ]]: - raise NotImplementedError() - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - Union[ - model_evaluation.ModelEvaluation, - Awaitable[model_evaluation.ModelEvaluation] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - Union[ - service.ListModelEvaluationsResponse, - Awaitable[service.ListModelEvaluationsResponse] - ]]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ( - 'AutoMlTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py deleted file mode 100644 index c77a051e..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc.py +++ /dev/null @@ -1,796 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.automl_v1.types import annotation_spec -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import service -from google.longrunning import operations_pb2 # type: ignore -from .base import AutoMlTransport, DEFAULT_CLIENT_INFO - - -class AutoMlGrpcTransport(AutoMlTransport): - """gRPC backend transport for AutoMl. - - AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/CreateDataset', - request_serializer=service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - dataset.Dataset]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a dataset. - - Returns: - Callable[[~.GetDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetDataset', - request_serializer=service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - service.ListDatasetsResponse]: - r"""Return a callable for the list datasets method over gRPC. - - Lists datasets in a project. - - Returns: - Callable[[~.ListDatasetsRequest], - ~.ListDatasetsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ListDatasets', - request_serializer=service.ListDatasetsRequest.serialize, - response_deserializer=service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - gca_dataset.Dataset]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/UpdateDataset', - request_serializer=service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/DeleteDataset', - request_serializer=service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - Returns: - Callable[[~.ImportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ImportData', - request_serializer=service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the export data method over gRPC. - - Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ExportData', - request_serializer=service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an annotation spec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - ~.AnnotationSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetAnnotationSpec', - request_serializer=service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the create model method over gRPC. - - Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - Returns: - Callable[[~.CreateModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model' not in self._stubs: - self._stubs['create_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/CreateModel', - request_serializer=service.CreateModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_model'] - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - model.Model]: - r"""Return a callable for the get model method over gRPC. - - Gets a model. - - Returns: - Callable[[~.GetModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetModel', - request_serializer=service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - service.ListModelsResponse]: - r"""Return a callable for the list models method over gRPC. - - Lists models. - - Returns: - Callable[[~.ListModelsRequest], - ~.ListModelsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ListModels', - request_serializer=service.ListModelsRequest.serialize, - response_deserializer=service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/DeleteModel', - request_serializer=service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def update_model(self) -> Callable[ - [service.UpdateModelRequest], - gca_model.Model]: - r"""Return a callable for the update model method over gRPC. - - Updates a model. - - Returns: - Callable[[~.UpdateModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/UpdateModel', - request_serializer=service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.DeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/DeployModel', - request_serializer=service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.UndeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/UndeployModel', - request_serializer=service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ExportModel', - request_serializer=service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a model evaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - ~.ModelEvaluation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetModelEvaluation', - request_serializer=service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - service.ListModelEvaluationsResponse]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists model evaluations. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - ~.ListModelEvaluationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ListModelEvaluations', - request_serializer=service.ListModelEvaluationsRequest.serialize, - response_deserializer=service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - def close(self): - self.grpc_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ( - 'AutoMlGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py deleted file mode 100644 index 0d68fd82..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py +++ /dev/null @@ -1,795 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.automl_v1.types import annotation_spec -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import service -from google.longrunning import operations_pb2 # type: ignore -from .base import AutoMlTransport, DEFAULT_CLIENT_INFO -from .grpc import AutoMlGrpcTransport - - -class AutoMlGrpcAsyncIOTransport(AutoMlTransport): - """gRPC AsyncIO backend transport for AutoMl. - - AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/CreateDataset', - request_serializer=service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - Awaitable[dataset.Dataset]]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a dataset. - - Returns: - Callable[[~.GetDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetDataset', - request_serializer=service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - Awaitable[service.ListDatasetsResponse]]: - r"""Return a callable for the list datasets method over gRPC. - - Lists datasets in a project. - - Returns: - Callable[[~.ListDatasetsRequest], - Awaitable[~.ListDatasetsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ListDatasets', - request_serializer=service.ListDatasetsRequest.serialize, - response_deserializer=service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - Awaitable[gca_dataset.Dataset]]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/UpdateDataset', - request_serializer=service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/DeleteDataset', - request_serializer=service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - Returns: - Callable[[~.ImportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ImportData', - request_serializer=service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export data method over gRPC. - - Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ExportData', - request_serializer=service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - Awaitable[annotation_spec.AnnotationSpec]]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an annotation spec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - Awaitable[~.AnnotationSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetAnnotationSpec', - request_serializer=service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create model method over gRPC. - - Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - Returns: - Callable[[~.CreateModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model' not in self._stubs: - self._stubs['create_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/CreateModel', - request_serializer=service.CreateModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_model'] - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - Awaitable[model.Model]]: - r"""Return a callable for the get model method over gRPC. - - Gets a model. - - Returns: - Callable[[~.GetModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetModel', - request_serializer=service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - Awaitable[service.ListModelsResponse]]: - r"""Return a callable for the list models method over gRPC. - - Lists models. - - Returns: - Callable[[~.ListModelsRequest], - Awaitable[~.ListModelsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ListModels', - request_serializer=service.ListModelsRequest.serialize, - response_deserializer=service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/DeleteModel', - request_serializer=service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def update_model(self) -> Callable[ - [service.UpdateModelRequest], - Awaitable[gca_model.Model]]: - r"""Return a callable for the update model method over gRPC. - - Updates a model. - - Returns: - Callable[[~.UpdateModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/UpdateModel', - request_serializer=service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.DeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/DeployModel', - request_serializer=service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.UndeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/UndeployModel', - request_serializer=service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ExportModel', - request_serializer=service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - Awaitable[model_evaluation.ModelEvaluation]]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a model evaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - Awaitable[~.ModelEvaluation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/GetModelEvaluation', - request_serializer=service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - Awaitable[service.ListModelEvaluationsResponse]]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists model evaluations. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - Awaitable[~.ListModelEvaluationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.AutoMl/ListModelEvaluations', - request_serializer=service.ListModelEvaluationsRequest.serialize, - response_deserializer=service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'AutoMlGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py deleted file mode 100644 index 017da644..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/auto_ml/transports/rest.py +++ /dev/null @@ -1,2366 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from google.api_core import operations_v1 -from requests import __version__ as requests_version -import dataclasses -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - - -from google.cloud.automl_v1.types import annotation_spec -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import service -from google.longrunning import operations_pb2 # type: ignore - -from .base import AutoMlTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=requests_version, -) - - -class AutoMlRestInterceptor: - """Interceptor for AutoMl. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the AutoMlRestTransport. - - .. code-block:: python - class MyCustomAutoMlInterceptor(AutoMlRestInterceptor): - def pre_create_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_delete_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_delete_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_deploy_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_deploy_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_export_data(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_export_data(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_export_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_export_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_annotation_spec(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_annotation_spec(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_model_evaluation(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_model_evaluation(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_import_data(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_import_data(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_datasets(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_datasets(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_model_evaluations(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_model_evaluations(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_models(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_models(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_undeploy_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_undeploy_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_model(self, response): - logging.log(f"Received response: {response}") - return response - - transport = AutoMlRestTransport(interceptor=MyCustomAutoMlInterceptor()) - client = AutoMlClient(transport=transport) - - - """ - def pre_create_dataset(self, request: service.CreateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_create_dataset(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for create_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_create_model(self, request: service.CreateModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_create_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for create_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_delete_dataset(self, request: service.DeleteDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_delete_dataset(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for delete_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_delete_model(self, request: service.DeleteModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_delete_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for delete_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_deploy_model(self, request: service.DeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for deploy_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_deploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for deploy_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_export_data(self, request: service.ExportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for export_data - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_export_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for export_data - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_export_model(self, request: service.ExportModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for export_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_export_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for export_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_annotation_spec(self, request: service.GetAnnotationSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetAnnotationSpecRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_annotation_spec - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_annotation_spec(self, response: annotation_spec.AnnotationSpec) -> annotation_spec.AnnotationSpec: - """Post-rpc interceptor for get_annotation_spec - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_dataset(self, request: service.GetDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: - """Post-rpc interceptor for get_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_model(self, request: service.GetModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_model(self, response: model.Model) -> model.Model: - """Post-rpc interceptor for get_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_model_evaluation(self, request: service.GetModelEvaluationRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelEvaluationRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_model_evaluation - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_model_evaluation(self, response: model_evaluation.ModelEvaluation) -> model_evaluation.ModelEvaluation: - """Post-rpc interceptor for get_model_evaluation - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_import_data(self, request: service.ImportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for import_data - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_import_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for import_data - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_datasets(self, request: service.ListDatasetsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_datasets - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_datasets(self, response: service.ListDatasetsResponse) -> service.ListDatasetsResponse: - """Post-rpc interceptor for list_datasets - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_model_evaluations(self, request: service.ListModelEvaluationsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelEvaluationsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_model_evaluations - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_model_evaluations(self, response: service.ListModelEvaluationsResponse) -> service.ListModelEvaluationsResponse: - """Post-rpc interceptor for list_model_evaluations - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_models(self, request: service.ListModelsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_models - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_models(self, response: service.ListModelsResponse) -> service.ListModelsResponse: - """Post-rpc interceptor for list_models - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_undeploy_model(self, request: service.UndeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for undeploy_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_undeploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for undeploy_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_update_dataset(self, request: service.UpdateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_update_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Dataset: - """Post-rpc interceptor for update_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_update_model(self, request: service.UpdateModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_update_model(self, response: gca_model.Model) -> gca_model.Model: - """Post-rpc interceptor for update_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - - -@dataclasses.dataclass -class AutoMlRestStub: - _session: AuthorizedSession - _host: str - _interceptor: AutoMlRestInterceptor - - -class AutoMlRestTransport(AutoMlTransport): - """REST backend transport for AutoMl. - - AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - - """ - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[ - ], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = 'https', - interceptor: Optional[AutoMlRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or AutoMlRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - 'google.longrunning.Operations.CancelOperation': [ - { - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}:cancel', - 'body': '*', - }, - ], - 'google.longrunning.Operations.DeleteOperation': [ - { - 'method': 'delete', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.GetOperation': [ - { - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.ListOperations': [ - { - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*}/operations', - }, - ], - 'google.longrunning.Operations.WaitOperation': [ - { - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}:wait', - 'body': '*', - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v1") - - self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) - - # Return the client from cache. - return self._operations_client - - class _CreateDataset(AutoMlRestStub): - def __hash__(self): - return hash("CreateDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.CreateDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the create dataset method over HTTP. - - Args: - request (~.service.CreateDatasetRequest): - The request object. Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{parent=projects/*/locations/*}/datasets', - 'body': 'dataset', - }, - ] - request, metadata = self._interceptor.pre_create_dataset(request, metadata) - pb_request = service.CreateDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_dataset(resp) - return resp - - class _CreateModel(AutoMlRestStub): - def __hash__(self): - return hash("CreateModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.CreateModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the create model method over HTTP. - - Args: - request (~.service.CreateModelRequest): - The request object. Request message for - [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{parent=projects/*/locations/*}/models', - 'body': 'model', - }, - ] - request, metadata = self._interceptor.pre_create_model(request, metadata) - pb_request = service.CreateModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_model(resp) - return resp - - class _DeleteDataset(AutoMlRestStub): - def __hash__(self): - return hash("DeleteDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.DeleteDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the delete dataset method over HTTP. - - Args: - request (~.service.DeleteDatasetRequest): - The request object. Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v1/{name=projects/*/locations/*/datasets/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_dataset(request, metadata) - pb_request = service.DeleteDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_delete_dataset(resp) - return resp - - class _DeleteModel(AutoMlRestStub): - def __hash__(self): - return hash("DeleteModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.DeleteModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the delete model method over HTTP. - - Args: - request (~.service.DeleteModelRequest): - The request object. Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v1/{name=projects/*/locations/*/models/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_model(request, metadata) - pb_request = service.DeleteModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_delete_model(resp) - return resp - - class _DeployModel(AutoMlRestStub): - def __hash__(self): - return hash("DeployModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.DeployModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the deploy model method over HTTP. - - Args: - request (~.service.DeployModelRequest): - The request object. Request message for - [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/models/*}:deploy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_deploy_model(request, metadata) - pb_request = service.DeployModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_deploy_model(resp) - return resp - - class _ExportData(AutoMlRestStub): - def __hash__(self): - return hash("ExportData") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ExportDataRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the export data method over HTTP. - - Args: - request (~.service.ExportDataRequest): - The request object. Request message for - [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/datasets/*}:exportData', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_export_data(request, metadata) - pb_request = service.ExportDataRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_export_data(resp) - return resp - - class _ExportModel(AutoMlRestStub): - def __hash__(self): - return hash("ExportModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ExportModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the export model method over HTTP. - - Args: - request (~.service.ExportModelRequest): - The request object. Request message for - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an - error code will be returned. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/models/*}:export', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_export_model(request, metadata) - pb_request = service.ExportModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_export_model(resp) - return resp - - class _GetAnnotationSpec(AutoMlRestStub): - def __hash__(self): - return hash("GetAnnotationSpec") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetAnnotationSpecRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> annotation_spec.AnnotationSpec: - r"""Call the get annotation spec method over HTTP. - - Args: - request (~.service.GetAnnotationSpecRequest): - The request object. Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.annotation_spec.AnnotationSpec: - A definition of an annotation spec. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}', - }, - ] - request, metadata = self._interceptor.pre_get_annotation_spec(request, metadata) - pb_request = service.GetAnnotationSpecRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = annotation_spec.AnnotationSpec() - pb_resp = annotation_spec.AnnotationSpec.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_annotation_spec(resp) - return resp - - class _GetDataset(AutoMlRestStub): - def __hash__(self): - return hash("GetDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> dataset.Dataset: - r"""Call the get dataset method over HTTP. - - Args: - request (~.service.GetDatasetRequest): - The request object. Request message for - [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.dataset.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*/datasets/*}', - }, - ] - request, metadata = self._interceptor.pre_get_dataset(request, metadata) - pb_request = service.GetDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = dataset.Dataset() - pb_resp = dataset.Dataset.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_dataset(resp) - return resp - - class _GetModel(AutoMlRestStub): - def __hash__(self): - return hash("GetModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> model.Model: - r"""Call the get model method over HTTP. - - Args: - request (~.service.GetModelRequest): - The request object. Request message for - [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.model.Model: - API proto representing a trained - machine learning model. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*/models/*}', - }, - ] - request, metadata = self._interceptor.pre_get_model(request, metadata) - pb_request = service.GetModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = model.Model() - pb_resp = model.Model.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_model(resp) - return resp - - class _GetModelEvaluation(AutoMlRestStub): - def __hash__(self): - return hash("GetModelEvaluation") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetModelEvaluationRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> model_evaluation.ModelEvaluation: - r"""Call the get model evaluation method over HTTP. - - Args: - request (~.service.GetModelEvaluationRequest): - The request object. Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.model_evaluation.ModelEvaluation: - Evaluation results of a model. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}', - }, - ] - request, metadata = self._interceptor.pre_get_model_evaluation(request, metadata) - pb_request = service.GetModelEvaluationRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = model_evaluation.ModelEvaluation() - pb_resp = model_evaluation.ModelEvaluation.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_model_evaluation(resp) - return resp - - class _ImportData(AutoMlRestStub): - def __hash__(self): - return hash("ImportData") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ImportDataRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the import data method over HTTP. - - Args: - request (~.service.ImportDataRequest): - The request object. Request message for - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/datasets/*}:importData', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_import_data(request, metadata) - pb_request = service.ImportDataRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_import_data(resp) - return resp - - class _ListDatasets(AutoMlRestStub): - def __hash__(self): - return hash("ListDatasets") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListDatasetsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListDatasetsResponse: - r"""Call the list datasets method over HTTP. - - Args: - request (~.service.ListDatasetsRequest): - The request object. Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListDatasetsResponse: - Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1/{parent=projects/*/locations/*}/datasets', - }, - ] - request, metadata = self._interceptor.pre_list_datasets(request, metadata) - pb_request = service.ListDatasetsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListDatasetsResponse() - pb_resp = service.ListDatasetsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_datasets(resp) - return resp - - class _ListModelEvaluations(AutoMlRestStub): - def __hash__(self): - return hash("ListModelEvaluations") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "filter" : "", } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListModelEvaluationsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListModelEvaluationsResponse: - r"""Call the list model evaluations method over HTTP. - - Args: - request (~.service.ListModelEvaluationsRequest): - The request object. Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListModelEvaluationsResponse: - Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations', - }, - ] - request, metadata = self._interceptor.pre_list_model_evaluations(request, metadata) - pb_request = service.ListModelEvaluationsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListModelEvaluationsResponse() - pb_resp = service.ListModelEvaluationsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_model_evaluations(resp) - return resp - - class _ListModels(AutoMlRestStub): - def __hash__(self): - return hash("ListModels") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListModelsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListModelsResponse: - r"""Call the list models method over HTTP. - - Args: - request (~.service.ListModelsRequest): - The request object. Request message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListModelsResponse: - Response message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1/{parent=projects/*/locations/*}/models', - }, - ] - request, metadata = self._interceptor.pre_list_models(request, metadata) - pb_request = service.ListModelsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListModelsResponse() - pb_resp = service.ListModelsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_models(resp) - return resp - - class _UndeployModel(AutoMlRestStub): - def __hash__(self): - return hash("UndeployModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.UndeployModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the undeploy model method over HTTP. - - Args: - request (~.service.UndeployModelRequest): - The request object. Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/models/*}:undeploy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_undeploy_model(request, metadata) - pb_request = service.UndeployModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_undeploy_model(resp) - return resp - - class _UpdateDataset(AutoMlRestStub): - def __hash__(self): - return hash("UpdateDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask" : {}, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.UpdateDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> gca_dataset.Dataset: - r"""Call the update dataset method over HTTP. - - Args: - request (~.service.UpdateDatasetRequest): - The request object. Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.gca_dataset.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v1/{dataset.name=projects/*/locations/*/datasets/*}', - 'body': 'dataset', - }, - ] - request, metadata = self._interceptor.pre_update_dataset(request, metadata) - pb_request = service.UpdateDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gca_dataset.Dataset() - pb_resp = gca_dataset.Dataset.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_dataset(resp) - return resp - - class _UpdateModel(AutoMlRestStub): - def __hash__(self): - return hash("UpdateModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask" : {}, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.UpdateModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> gca_model.Model: - r"""Call the update model method over HTTP. - - Args: - request (~.service.UpdateModelRequest): - The request object. Request message for - [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.gca_model.Model: - API proto representing a trained - machine learning model. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v1/{model.name=projects/*/locations/*/models/*}', - 'body': 'model', - }, - ] - request, metadata = self._interceptor.pre_update_model(request, metadata) - pb_request = service.UpdateModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gca_model.Model() - pb_resp = gca_model.Model.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_model(resp) - return resp - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeployModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ExportData(self._session, self._host, self._interceptor) # type: ignore - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ExportModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetAnnotationSpec(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - dataset.Dataset]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - model.Model]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetModelEvaluation(self._session, self._host, self._interceptor) # type: ignore - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ImportData(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - service.ListDatasetsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListDatasets(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - service.ListModelEvaluationsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListModelEvaluations(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - service.ListModelsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListModels(self._session, self._host, self._interceptor) # type: ignore - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UndeployModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - gca_dataset.Dataset]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_model(self) -> Callable[ - [service.UpdateModelRequest], - gca_model.Model]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__=( - 'AutoMlRestTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py deleted file mode 100644 index 905b8c43..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PredictionServiceClient -from .async_client import PredictionServiceAsyncClient - -__all__ = ( - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py deleted file mode 100644 index 86342cd0..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/async_client.py +++ /dev/null @@ -1,656 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union - -from google.cloud.automl_v1 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1.types import annotation_payload -from google.cloud.automl_v1.types import data_items -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import operations -from google.cloud.automl_v1.types import prediction_service -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .client import PredictionServiceClient - - -class PredictionServiceAsyncClient: - """AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - """ - - _client: PredictionServiceClient - - DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT - - model_path = staticmethod(PredictionServiceClient.model_path) - parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) - common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PredictionServiceClient.common_project_path) - parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) - common_location_path = staticmethod(PredictionServiceClient.common_location_path) - parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return PredictionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PredictionServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def predict(self, - request: Optional[Union[prediction_service.PredictRequest, dict]] = None, - *, - name: Optional[str] = None, - payload: Optional[data_items.ExamplePayload] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. The prediction result is directly - returned in the response. Available for following ML scenarios, - and their expected request payloads: - - AutoML Vision Classification - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Vision Object Detection - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Natural Language Classification - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Natural Language Entity Extraction - - - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a - document in .PDF, .TIF or .TIFF format with size upto 20MB. - - AutoML Natural Language Sentiment Analysis - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Translation - - - A TextSnippet up to 25,000 characters, UTF-8 encoded. - - AutoML Tables - - - A row with column values matching the columns of the model, - up to 5MB. Not available for FORECASTING ``prediction_type``. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_predict(): - # Create a client - client = automl_v1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - payload = automl_v1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = await client.predict(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.PredictRequest, dict]]): - The request object. Request message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - name (:class:`str`): - Required. Name of the model requested - to serve the prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - payload (:class:`google.cloud.automl_v1.types.ExamplePayload`): - Required. Payload to perform a - prediction on. The payload must match - the problem type that the model was - trained to solve. - - This corresponds to the ``payload`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (:class:`MutableMapping[str, str]`): - Additional domain-specific parameters, any string must - be up to 25000 characters long. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for an image, it will - only produce results that have at least this confidence - score. The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects - on the image, it will only produce bounding boxes which - have at least this confidence score. Value in 0 to 1 - range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number - of bounding boxes returned. The default is 100. The - number of returned bounding boxes might be limited by - the server. - - AutoML Tables - - ``feature_importance`` : (boolean) Whether - [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] - is populated in the returned list of - [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] - objects. The default is false. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, payload, params]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.PredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if payload is not None: - request.payload = payload - - if params: - request.params.update(params) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.predict, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_predict(self, - request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.BatchPredictInputConfig] = None, - output_config: Optional[io.BatchPredictOutputConfig] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML scenarios: - - - AutoML Vision Classification - - AutoML Vision Object Detection - - AutoML Video Intelligence Classification - - AutoML Video Intelligence Object Tracking \* AutoML Natural - Language Classification - - AutoML Natural Language Entity Extraction - - AutoML Natural Language Sentiment Analysis - - AutoML Tables - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - async def sample_batch_predict(): - # Create a client - client = automl_v1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - input_config = automl_v1.BatchPredictInputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - output_config = automl_v1.BatchPredictOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.BatchPredictRequest( - name="name_value", - input_config=input_config, - output_config=output_config, - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1.types.BatchPredictRequest, dict]]): - The request object. Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - name (:class:`str`): - Required. Name of the model requested - to serve the batch prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (:class:`google.cloud.automl_v1.types.BatchPredictInputConfig`): - Required. The input configuration for - batch prediction. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.automl_v1.types.BatchPredictOutputConfig`): - Required. The Configuration - specifying where output predictions - should be written. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (:class:`MutableMapping[str, str]`): - Additional domain-specific parameters for the - predictions, any string must be up to 25000 characters - long. - - AutoML Natural Language Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for a text snippet, it - will only produce results that have at least this - confidence score. The default is 0.5. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for an image, it will - only produce results that have at least this confidence - score. The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects - on the image, it will only produce bounding boxes which - have at least this confidence score. Value in 0 to 1 - range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number - of bounding boxes returned per image. The default is - 100, the number of bounding boxes returned might be - limited by the server. AutoML Video Intelligence - Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for a video, it will - only produce results that have at least this confidence - score. The default is 0.5. - - ``segment_classification`` : (boolean) Set to true to - request segment-level classification. AutoML Video - Intelligence returns labels and their confidence scores - for the entire segment of the video that user specified - in the request configuration. The default is true. - - ``shot_classification`` : (boolean) Set to true to - request shot-level classification. AutoML Video - Intelligence determines the boundaries for each camera - shot in the entire segment of the video that user - specified in the request configuration. AutoML Video - Intelligence then returns labels and their confidence - scores for each detected shot, along with the start and - end time of the shot. The default is false. - - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on - training data, but there are no metrics provided to - describe that quality. - - ``1s_interval_classification`` : (boolean) Set to true - to request classification for a video at one-second - intervals. AutoML Video Intelligence returns labels and - their confidence scores for each second of the entire - segment of the video that user specified in the request - configuration. The default is false. - - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on - training data, but there are no metrics provided to - describe that quality. - - AutoML Video Intelligence Object Tracking - - ``score_threshold`` : (float) When Model detects objects - on video frames, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to - 1 range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number - of bounding boxes returned per image. The default is - 100, the number of bounding boxes returned might be - limited by the server. - - ``min_bounding_box_size`` : (float) Only bounding boxes - with shortest edge at least that long as a relative - value of video frame size are returned. Value in 0 to 1 - range. Default is 0. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.automl_v1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of - the operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config, output_config, params]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.BatchPredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - if output_config is not None: - request.output_config = output_config - - if params: - request.params.update(params) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_predict, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - prediction_service.BatchPredictResult, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self) -> "PredictionServiceAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "PredictionServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py deleted file mode 100644 index 96c0c3bf..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/client.py +++ /dev/null @@ -1,858 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast - -from google.cloud.automl_v1 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1.types import annotation_payload -from google.cloud.automl_v1.types import data_items -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import operations -from google.cloud.automl_v1.types import prediction_service -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PredictionServiceGrpcTransport -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .transports.rest import PredictionServiceRestTransport - - -class PredictionServiceClientMeta(type): - """Metaclass for the PredictionService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] - _transport_registry["grpc"] = PredictionServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport - _transport_registry["rest"] = PredictionServiceRestTransport - - def get_transport_class(cls, - label: Optional[str] = None, - ) -> Type[PredictionServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PredictionServiceClient(metaclass=PredictionServiceClientMeta): - """AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "automl.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, PredictionServiceTransport]] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) - - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) - - api_key_value = getattr(client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError("client_options.api_key and credentials are mutually exclusive") - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PredictionServiceTransport): - # transport is a PredictionServiceTransport instance. - if credentials or client_options.credentials_file or api_key_value: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - import google.auth._default # type: ignore - - if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): - credentials = google.auth._default.get_api_key_credentials(api_key_value) - - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=client_options.api_audience, - ) - - def predict(self, - request: Optional[Union[prediction_service.PredictRequest, dict]] = None, - *, - name: Optional[str] = None, - payload: Optional[data_items.ExamplePayload] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. The prediction result is directly - returned in the response. Available for following ML scenarios, - and their expected request payloads: - - AutoML Vision Classification - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Vision Object Detection - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Natural Language Classification - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Natural Language Entity Extraction - - - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a - document in .PDF, .TIF or .TIFF format with size upto 20MB. - - AutoML Natural Language Sentiment Analysis - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Translation - - - A TextSnippet up to 25,000 characters, UTF-8 encoded. - - AutoML Tables - - - A row with column values matching the columns of the model, - up to 5MB. Not available for FORECASTING ``prediction_type``. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_predict(): - # Create a client - client = automl_v1.PredictionServiceClient() - - # Initialize request argument(s) - payload = automl_v1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = client.predict(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.PredictRequest, dict]): - The request object. Request message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - name (str): - Required. Name of the model requested - to serve the prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - payload (google.cloud.automl_v1.types.ExamplePayload): - Required. Payload to perform a - prediction on. The payload must match - the problem type that the model was - trained to solve. - - This corresponds to the ``payload`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (MutableMapping[str, str]): - Additional domain-specific parameters, any string must - be up to 25000 characters long. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for an image, it will - only produce results that have at least this confidence - score. The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects - on the image, it will only produce bounding boxes which - have at least this confidence score. Value in 0 to 1 - range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number - of bounding boxes returned. The default is 100. The - number of returned bounding boxes might be limited by - the server. - - AutoML Tables - - ``feature_importance`` : (boolean) Whether - [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] - is populated in the returned list of - [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] - objects. The default is false. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, payload, params]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.PredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.PredictRequest): - request = prediction_service.PredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if payload is not None: - request.payload = payload - if params is not None: - request.params = params - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_predict(self, - request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.BatchPredictInputConfig] = None, - output_config: Optional[io.BatchPredictOutputConfig] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML scenarios: - - - AutoML Vision Classification - - AutoML Vision Object Detection - - AutoML Video Intelligence Classification - - AutoML Video Intelligence Object Tracking \* AutoML Natural - Language Classification - - AutoML Natural Language Entity Extraction - - AutoML Natural Language Sentiment Analysis - - AutoML Tables - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1 - - def sample_batch_predict(): - # Create a client - client = automl_v1.PredictionServiceClient() - - # Initialize request argument(s) - input_config = automl_v1.BatchPredictInputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - output_config = automl_v1.BatchPredictOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.BatchPredictRequest( - name="name_value", - input_config=input_config, - output_config=output_config, - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1.types.BatchPredictRequest, dict]): - The request object. Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - name (str): - Required. Name of the model requested - to serve the batch prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): - Required. The input configuration for - batch prediction. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.automl_v1.types.BatchPredictOutputConfig): - Required. The Configuration - specifying where output predictions - should be written. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (MutableMapping[str, str]): - Additional domain-specific parameters for the - predictions, any string must be up to 25000 characters - long. - - AutoML Natural Language Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for a text snippet, it - will only produce results that have at least this - confidence score. The default is 0.5. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for an image, it will - only produce results that have at least this confidence - score. The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects - on the image, it will only produce bounding boxes which - have at least this confidence score. Value in 0 to 1 - range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number - of bounding boxes returned per image. The default is - 100, the number of bounding boxes returned might be - limited by the server. AutoML Video Intelligence - Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. - When the model makes predictions for a video, it will - only produce results that have at least this confidence - score. The default is 0.5. - - ``segment_classification`` : (boolean) Set to true to - request segment-level classification. AutoML Video - Intelligence returns labels and their confidence scores - for the entire segment of the video that user specified - in the request configuration. The default is true. - - ``shot_classification`` : (boolean) Set to true to - request shot-level classification. AutoML Video - Intelligence determines the boundaries for each camera - shot in the entire segment of the video that user - specified in the request configuration. AutoML Video - Intelligence then returns labels and their confidence - scores for each detected shot, along with the start and - end time of the shot. The default is false. - - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on - training data, but there are no metrics provided to - describe that quality. - - ``1s_interval_classification`` : (boolean) Set to true - to request classification for a video at one-second - intervals. AutoML Video Intelligence returns labels and - their confidence scores for each second of the entire - segment of the video that user specified in the request - configuration. The default is false. - - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on - training data, but there are no metrics provided to - describe that quality. - - AutoML Video Intelligence Object Tracking - - ``score_threshold`` : (float) When Model detects objects - on video frames, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to - 1 range, default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number - of bounding boxes returned per image. The default is - 100, the number of bounding boxes returned might be - limited by the server. - - ``min_bounding_box_size`` : (float) Only bounding boxes - with shortest edge at least that long as a relative - value of video frame size are returned. Value in 0 to 1 - range. Default is 0. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.automl_v1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of - the operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config, output_config, params]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.BatchPredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.BatchPredictRequest): - request = prediction_service.BatchPredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - if output_config is not None: - request.output_config = output_config - if params is not None: - request.params = params - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - prediction_service.BatchPredictResult, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self) -> "PredictionServiceClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - - - - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "PredictionServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py deleted file mode 100644 index d8c81688..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PredictionServiceTransport -from .grpc import PredictionServiceGrpcTransport -from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .rest import PredictionServiceRestTransport -from .rest import PredictionServiceRestInterceptor - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] -_transport_registry['grpc'] = PredictionServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport -_transport_registry['rest'] = PredictionServiceRestTransport - -__all__ = ( - 'PredictionServiceTransport', - 'PredictionServiceGrpcTransport', - 'PredictionServiceGrpcAsyncIOTransport', - 'PredictionServiceRestTransport', - 'PredictionServiceRestInterceptor', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py deleted file mode 100644 index 609956a0..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/base.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.automl_v1 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.automl_v1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -class PredictionServiceTransport(abc.ABC): - """Abstract transport class for PredictionService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'automl.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience(api_audience if api_audience else host) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.predict: gapic_v1.method.wrap_method( - self.predict, - default_timeout=60.0, - client_info=client_info, - ), - self.batch_predict: gapic_v1.method.wrap_method( - self.batch_predict, - default_timeout=60.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Union[ - prediction_service.PredictResponse, - Awaitable[prediction_service.PredictResponse] - ]]: - raise NotImplementedError() - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ( - 'PredictionServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py deleted file mode 100644 index 982858db..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.automl_v1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO - - -class PredictionServiceGrpcTransport(PredictionServiceTransport): - """gRPC backend transport for PredictionService. - - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. The prediction result is directly - returned in the response. Available for following ML scenarios, - and their expected request payloads: - - AutoML Vision Classification - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Vision Object Detection - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Natural Language Classification - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Natural Language Entity Extraction - - - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a - document in .PDF, .TIF or .TIFF format with size upto 20MB. - - AutoML Natural Language Sentiment Analysis - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Translation - - - A TextSnippet up to 25,000 characters, UTF-8 encoded. - - AutoML Tables - - - A row with column values matching the columns of the model, - up to 5MB. Not available for FORECASTING ``prediction_type``. - - Returns: - Callable[[~.PredictRequest], - ~.PredictResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch predict method over gRPC. - - Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML scenarios: - - - AutoML Vision Classification - - AutoML Vision Object Detection - - AutoML Video Intelligence Classification - - AutoML Video Intelligence Object Tracking \* AutoML Natural - Language Classification - - AutoML Natural Language Entity Extraction - - AutoML Natural Language Sentiment Analysis - - AutoML Tables - - Returns: - Callable[[~.BatchPredictRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_predict' not in self._stubs: - self._stubs['batch_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.PredictionService/BatchPredict', - request_serializer=prediction_service.BatchPredictRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_predict'] - - def close(self): - self.grpc_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ( - 'PredictionServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py deleted file mode 100644 index 2bb725ec..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,366 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.automl_v1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PredictionServiceGrpcTransport - - -class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): - """gRPC AsyncIO backend transport for PredictionService. - - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Awaitable[prediction_service.PredictResponse]]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. The prediction result is directly - returned in the response. Available for following ML scenarios, - and their expected request payloads: - - AutoML Vision Classification - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Vision Object Detection - - - An image in .JPEG, .GIF or .PNG format, image_bytes up to - 30MB. - - AutoML Natural Language Classification - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Natural Language Entity Extraction - - - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a - document in .PDF, .TIF or .TIFF format with size upto 20MB. - - AutoML Natural Language Sentiment Analysis - - - A TextSnippet up to 60,000 characters, UTF-8 encoded or a - document in .PDF, .TIF or .TIFF format with size upto 2MB. - - AutoML Translation - - - A TextSnippet up to 25,000 characters, UTF-8 encoded. - - AutoML Tables - - - A row with column values matching the columns of the model, - up to 5MB. Not available for FORECASTING ``prediction_type``. - - Returns: - Callable[[~.PredictRequest], - Awaitable[~.PredictResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch predict method over gRPC. - - Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML scenarios: - - - AutoML Vision Classification - - AutoML Vision Object Detection - - AutoML Video Intelligence Classification - - AutoML Video Intelligence Object Tracking \* AutoML Natural - Language Classification - - AutoML Natural Language Entity Extraction - - AutoML Natural Language Sentiment Analysis - - AutoML Tables - - Returns: - Callable[[~.BatchPredictRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_predict' not in self._stubs: - self._stubs['batch_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1.PredictionService/BatchPredict', - request_serializer=prediction_service.BatchPredictRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_predict'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py b/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py deleted file mode 100644 index 86096e21..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/services/prediction_service/transports/rest.py +++ /dev/null @@ -1,484 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from google.api_core import operations_v1 -from requests import __version__ as requests_version -import dataclasses -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - - -from google.cloud.automl_v1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore - -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=requests_version, -) - - -class PredictionServiceRestInterceptor: - """Interceptor for PredictionService. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the PredictionServiceRestTransport. - - .. code-block:: python - class MyCustomPredictionServiceInterceptor(PredictionServiceRestInterceptor): - def pre_batch_predict(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_batch_predict(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_predict(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_predict(self, response): - logging.log(f"Received response: {response}") - return response - - transport = PredictionServiceRestTransport(interceptor=MyCustomPredictionServiceInterceptor()) - client = PredictionServiceClient(transport=transport) - - - """ - def pre_batch_predict(self, request: prediction_service.BatchPredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.BatchPredictRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for batch_predict - - Override in a subclass to manipulate the request or metadata - before they are sent to the PredictionService server. - """ - return request, metadata - - def post_batch_predict(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for batch_predict - - Override in a subclass to manipulate the response - after it is returned by the PredictionService server but before - it is returned to user code. - """ - return response - def pre_predict(self, request: prediction_service.PredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.PredictRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for predict - - Override in a subclass to manipulate the request or metadata - before they are sent to the PredictionService server. - """ - return request, metadata - - def post_predict(self, response: prediction_service.PredictResponse) -> prediction_service.PredictResponse: - """Post-rpc interceptor for predict - - Override in a subclass to manipulate the response - after it is returned by the PredictionService server but before - it is returned to user code. - """ - return response - - -@dataclasses.dataclass -class PredictionServiceRestStub: - _session: AuthorizedSession - _host: str - _interceptor: PredictionServiceRestInterceptor - - -class PredictionServiceRestTransport(PredictionServiceTransport): - """REST backend transport for PredictionService. - - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or dash-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - - """ - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[ - ], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = 'https', - interceptor: Optional[PredictionServiceRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or PredictionServiceRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - 'google.longrunning.Operations.CancelOperation': [ - { - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}:cancel', - 'body': '*', - }, - ], - 'google.longrunning.Operations.DeleteOperation': [ - { - 'method': 'delete', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.GetOperation': [ - { - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.ListOperations': [ - { - 'method': 'get', - 'uri': '/v1/{name=projects/*/locations/*}/operations', - }, - ], - 'google.longrunning.Operations.WaitOperation': [ - { - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/operations/*}:wait', - 'body': '*', - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v1") - - self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) - - # Return the client from cache. - return self._operations_client - - class _BatchPredict(PredictionServiceRestStub): - def __hash__(self): - return hash("BatchPredict") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: prediction_service.BatchPredictRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the batch predict method over HTTP. - - Args: - request (~.prediction_service.BatchPredictRequest): - The request object. Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/models/*}:batchPredict', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_batch_predict(request, metadata) - pb_request = prediction_service.BatchPredictRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_batch_predict(resp) - return resp - - class _Predict(PredictionServiceRestStub): - def __hash__(self): - return hash("Predict") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: prediction_service.PredictRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> prediction_service.PredictResponse: - r"""Call the predict method over HTTP. - - Args: - request (~.prediction_service.PredictRequest): - The request object. Request message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.prediction_service.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1/{name=projects/*/locations/*/models/*}:predict', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_predict(request, metadata) - pb_request = prediction_service.PredictRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = prediction_service.PredictResponse() - pb_resp = prediction_service.PredictResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_predict(resp) - return resp - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._BatchPredict(self._session, self._host, self._interceptor) # type: ignore - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._Predict(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__=( - 'PredictionServiceRestTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py deleted file mode 100644 index a94ad111..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/__init__.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .annotation_payload import ( - AnnotationPayload, -) -from .annotation_spec import ( - AnnotationSpec, -) -from .classification import ( - ClassificationAnnotation, - ClassificationEvaluationMetrics, - ClassificationType, -) -from .data_items import ( - Document, - DocumentDimensions, - ExamplePayload, - Image, - TextSnippet, -) -from .dataset import ( - Dataset, -) -from .detection import ( - BoundingBoxMetricsEntry, - ImageObjectDetectionAnnotation, - ImageObjectDetectionEvaluationMetrics, -) -from .geometry import ( - BoundingPoly, - NormalizedVertex, -) -from .image import ( - ImageClassificationDatasetMetadata, - ImageClassificationModelDeploymentMetadata, - ImageClassificationModelMetadata, - ImageObjectDetectionDatasetMetadata, - ImageObjectDetectionModelDeploymentMetadata, - ImageObjectDetectionModelMetadata, -) -from .io import ( - BatchPredictInputConfig, - BatchPredictOutputConfig, - DocumentInputConfig, - GcsDestination, - GcsSource, - InputConfig, - ModelExportOutputConfig, - OutputConfig, -) -from .model import ( - Model, -) -from .model_evaluation import ( - ModelEvaluation, -) -from .operations import ( - BatchPredictOperationMetadata, - CreateDatasetOperationMetadata, - CreateModelOperationMetadata, - DeleteOperationMetadata, - DeployModelOperationMetadata, - ExportDataOperationMetadata, - ExportModelOperationMetadata, - ImportDataOperationMetadata, - OperationMetadata, - UndeployModelOperationMetadata, -) -from .prediction_service import ( - BatchPredictRequest, - BatchPredictResult, - PredictRequest, - PredictResponse, -) -from .service import ( - CreateDatasetRequest, - CreateModelRequest, - DeleteDatasetRequest, - DeleteModelRequest, - DeployModelRequest, - ExportDataRequest, - ExportModelRequest, - GetAnnotationSpecRequest, - GetDatasetRequest, - GetModelEvaluationRequest, - GetModelRequest, - ImportDataRequest, - ListDatasetsRequest, - ListDatasetsResponse, - ListModelEvaluationsRequest, - ListModelEvaluationsResponse, - ListModelsRequest, - ListModelsResponse, - UndeployModelRequest, - UpdateDatasetRequest, - UpdateModelRequest, -) -from .text import ( - TextClassificationDatasetMetadata, - TextClassificationModelMetadata, - TextExtractionDatasetMetadata, - TextExtractionModelMetadata, - TextSentimentDatasetMetadata, - TextSentimentModelMetadata, -) -from .text_extraction import ( - TextExtractionAnnotation, - TextExtractionEvaluationMetrics, -) -from .text_segment import ( - TextSegment, -) -from .text_sentiment import ( - TextSentimentAnnotation, - TextSentimentEvaluationMetrics, -) -from .translation import ( - TranslationAnnotation, - TranslationDatasetMetadata, - TranslationEvaluationMetrics, - TranslationModelMetadata, -) - -__all__ = ( - 'AnnotationPayload', - 'AnnotationSpec', - 'ClassificationAnnotation', - 'ClassificationEvaluationMetrics', - 'ClassificationType', - 'Document', - 'DocumentDimensions', - 'ExamplePayload', - 'Image', - 'TextSnippet', - 'Dataset', - 'BoundingBoxMetricsEntry', - 'ImageObjectDetectionAnnotation', - 'ImageObjectDetectionEvaluationMetrics', - 'BoundingPoly', - 'NormalizedVertex', - 'ImageClassificationDatasetMetadata', - 'ImageClassificationModelDeploymentMetadata', - 'ImageClassificationModelMetadata', - 'ImageObjectDetectionDatasetMetadata', - 'ImageObjectDetectionModelDeploymentMetadata', - 'ImageObjectDetectionModelMetadata', - 'BatchPredictInputConfig', - 'BatchPredictOutputConfig', - 'DocumentInputConfig', - 'GcsDestination', - 'GcsSource', - 'InputConfig', - 'ModelExportOutputConfig', - 'OutputConfig', - 'Model', - 'ModelEvaluation', - 'BatchPredictOperationMetadata', - 'CreateDatasetOperationMetadata', - 'CreateModelOperationMetadata', - 'DeleteOperationMetadata', - 'DeployModelOperationMetadata', - 'ExportDataOperationMetadata', - 'ExportModelOperationMetadata', - 'ImportDataOperationMetadata', - 'OperationMetadata', - 'UndeployModelOperationMetadata', - 'BatchPredictRequest', - 'BatchPredictResult', - 'PredictRequest', - 'PredictResponse', - 'CreateDatasetRequest', - 'CreateModelRequest', - 'DeleteDatasetRequest', - 'DeleteModelRequest', - 'DeployModelRequest', - 'ExportDataRequest', - 'ExportModelRequest', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'GetModelEvaluationRequest', - 'GetModelRequest', - 'ImportDataRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UndeployModelRequest', - 'UpdateDatasetRequest', - 'UpdateModelRequest', - 'TextClassificationDatasetMetadata', - 'TextClassificationModelMetadata', - 'TextExtractionDatasetMetadata', - 'TextExtractionModelMetadata', - 'TextSentimentDatasetMetadata', - 'TextSentimentModelMetadata', - 'TextExtractionAnnotation', - 'TextExtractionEvaluationMetrics', - 'TextSegment', - 'TextSentimentAnnotation', - 'TextSentimentEvaluationMetrics', - 'TranslationAnnotation', - 'TranslationDatasetMetadata', - 'TranslationEvaluationMetrics', - 'TranslationModelMetadata', -) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py deleted file mode 100644 index 0164e1be..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_payload.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import classification as gca_classification -from google.cloud.automl_v1.types import detection -from google.cloud.automl_v1.types import text_extraction as gca_text_extraction -from google.cloud.automl_v1.types import text_sentiment as gca_text_sentiment -from google.cloud.automl_v1.types import translation as gca_translation - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'AnnotationPayload', - }, -) - - -class AnnotationPayload(proto.Message): - r"""Contains annotation information that is relevant to AutoML. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - translation (google.cloud.automl_v1.types.TranslationAnnotation): - Annotation details for translation. - - This field is a member of `oneof`_ ``detail``. - classification (google.cloud.automl_v1.types.ClassificationAnnotation): - Annotation details for content or image - classification. - - This field is a member of `oneof`_ ``detail``. - image_object_detection (google.cloud.automl_v1.types.ImageObjectDetectionAnnotation): - Annotation details for image object - detection. - - This field is a member of `oneof`_ ``detail``. - text_extraction (google.cloud.automl_v1.types.TextExtractionAnnotation): - Annotation details for text extraction. - - This field is a member of `oneof`_ ``detail``. - text_sentiment (google.cloud.automl_v1.types.TextSentimentAnnotation): - Annotation details for text sentiment. - - This field is a member of `oneof`_ ``detail``. - annotation_spec_id (str): - Output only . The resource ID of the - annotation spec that this annotation pertains - to. The annotation spec comes from either an - ancestor dataset, or the dataset that was used - to train the model in use. - display_name (str): - Output only. The value of - [display_name][google.cloud.automl.v1.AnnotationSpec.display_name] - when the model was trained. Because this field returns a - value at model training time, for different models trained - using the same dataset, the returned value could be - different as model owner could update the ``display_name`` - between any two model training. - """ - - translation: gca_translation.TranslationAnnotation = proto.Field( - proto.MESSAGE, - number=2, - oneof='detail', - message=gca_translation.TranslationAnnotation, - ) - classification: gca_classification.ClassificationAnnotation = proto.Field( - proto.MESSAGE, - number=3, - oneof='detail', - message=gca_classification.ClassificationAnnotation, - ) - image_object_detection: detection.ImageObjectDetectionAnnotation = proto.Field( - proto.MESSAGE, - number=4, - oneof='detail', - message=detection.ImageObjectDetectionAnnotation, - ) - text_extraction: gca_text_extraction.TextExtractionAnnotation = proto.Field( - proto.MESSAGE, - number=6, - oneof='detail', - message=gca_text_extraction.TextExtractionAnnotation, - ) - text_sentiment: gca_text_sentiment.TextSentimentAnnotation = proto.Field( - proto.MESSAGE, - number=7, - oneof='detail', - message=gca_text_sentiment.TextSentimentAnnotation, - ) - annotation_spec_id: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py deleted file mode 100644 index 81961c68..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/annotation_spec.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'AnnotationSpec', - }, -) - - -class AnnotationSpec(proto.Message): - r"""A definition of an annotation spec. - - Attributes: - name (str): - Output only. Resource name of the annotation spec. Form: - 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' - display_name (str): - Required. The name of the annotation spec to show in the - interface. The name can be up to 32 characters long and must - match the regexp ``[a-zA-Z0-9_]+``. - example_count (int): - Output only. The number of examples in the - parent dataset labeled by the annotation spec. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - example_count: int = proto.Field( - proto.INT32, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py deleted file mode 100644 index 1885dd52..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/classification.py +++ /dev/null @@ -1,310 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'ClassificationType', - 'ClassificationAnnotation', - 'ClassificationEvaluationMetrics', - }, -) - - -class ClassificationType(proto.Enum): - r"""Type of the classification problem. - - Values: - CLASSIFICATION_TYPE_UNSPECIFIED (0): - An un-set value of this enum. - MULTICLASS (1): - At most one label is allowed per example. - MULTILABEL (2): - Multiple labels are allowed for one example. - """ - CLASSIFICATION_TYPE_UNSPECIFIED = 0 - MULTICLASS = 1 - MULTILABEL = 2 - - -class ClassificationAnnotation(proto.Message): - r"""Contains annotation details specific to classification. - - Attributes: - score (float): - Output only. A confidence estimate between - 0.0 and 1.0. A higher value means greater - confidence that the annotation is positive. If a - user approves an annotation as negative or - positive, the score value remains unchanged. If - a user creates an annotation, the score is 0 for - negative or 1 for positive. - """ - - score: float = proto.Field( - proto.FLOAT, - number=1, - ) - - -class ClassificationEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for classification problems. Note: For - Video Classification this metrics only describe quality of the Video - Classification predictions of "segment_classification" type. - - Attributes: - au_prc (float): - Output only. The Area Under Precision-Recall - Curve metric. Micro-averaged for the overall - evaluation. - au_roc (float): - Output only. The Area Under Receiver - Operating Characteristic curve metric. - Micro-averaged for the overall evaluation. - log_loss (float): - Output only. The Log Loss metric. - confidence_metrics_entry (MutableSequence[google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): - Output only. Metrics for each confidence_threshold in - 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and - position_threshold = INT32_MAX_VALUE. ROC and - precision-recall curves, and other aggregated metrics are - derived from them. The confidence metrics entries may also - be supplied for additional values of position_threshold, but - from these no aggregated metrics are computed. - confusion_matrix (google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix): - Output only. Confusion matrix of the - evaluation. Only set for MULTICLASS - classification problems where number of labels - is no more than 10. - Only set for model level evaluation, not for - evaluation per label. - annotation_spec_id (MutableSequence[str]): - Output only. The annotation spec ids used for - this evaluation. - """ - - class ConfidenceMetricsEntry(proto.Message): - r"""Metrics for a single confidence threshold. - - Attributes: - confidence_threshold (float): - Output only. Metrics are computed with an - assumption that the model never returns - predictions with score lower than this value. - position_threshold (int): - Output only. Metrics are computed with an assumption that - the model always returns at most this many predictions - (ordered by their score, descendingly), but they all still - need to meet the confidence_threshold. - recall (float): - Output only. Recall (True Positive Rate) for - the given confidence threshold. - precision (float): - Output only. Precision for the given - confidence threshold. - false_positive_rate (float): - Output only. False Positive Rate for the - given confidence threshold. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - recall_at1 (float): - Output only. The Recall (True Positive Rate) - when only considering the label that has the - highest prediction score and not below the - confidence threshold for each example. - precision_at1 (float): - Output only. The precision when only - considering the label that has the highest - prediction score and not below the confidence - threshold for each example. - false_positive_rate_at1 (float): - Output only. The False Positive Rate when - only considering the label that has the highest - prediction score and not below the confidence - threshold for each example. - f1_score_at1 (float): - Output only. The harmonic mean of - [recall_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] - and - [precision_at1][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. - true_positive_count (int): - Output only. The number of model created - labels that match a ground truth label. - false_positive_count (int): - Output only. The number of model created - labels that do not match a ground truth label. - false_negative_count (int): - Output only. The number of ground truth - labels that are not matched by a model created - label. - true_negative_count (int): - Output only. The number of labels that were - not created by the model, but if they would, - they would not match a ground truth label. - """ - - confidence_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - position_threshold: int = proto.Field( - proto.INT32, - number=14, - ) - recall: float = proto.Field( - proto.FLOAT, - number=2, - ) - precision: float = proto.Field( - proto.FLOAT, - number=3, - ) - false_positive_rate: float = proto.Field( - proto.FLOAT, - number=8, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=4, - ) - recall_at1: float = proto.Field( - proto.FLOAT, - number=5, - ) - precision_at1: float = proto.Field( - proto.FLOAT, - number=6, - ) - false_positive_rate_at1: float = proto.Field( - proto.FLOAT, - number=9, - ) - f1_score_at1: float = proto.Field( - proto.FLOAT, - number=7, - ) - true_positive_count: int = proto.Field( - proto.INT64, - number=10, - ) - false_positive_count: int = proto.Field( - proto.INT64, - number=11, - ) - false_negative_count: int = proto.Field( - proto.INT64, - number=12, - ) - true_negative_count: int = proto.Field( - proto.INT64, - number=13, - ) - - class ConfusionMatrix(proto.Message): - r"""Confusion matrix of the model running the classification. - - Attributes: - annotation_spec_id (MutableSequence[str]): - Output only. IDs of the annotation specs used in the - confusion matrix. For Tables CLASSIFICATION - [prediction_type][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type] - only list of [annotation_spec_display_name-s][] is - populated. - display_name (MutableSequence[str]): - Output only. Display name of the annotation specs used in - the confusion matrix, as they were at the moment of the - evaluation. For Tables CLASSIFICATION - [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type], - distinct values of the target column at the moment of the - model evaluation are populated here. - row (MutableSequence[google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): - Output only. Rows in the confusion matrix. The number of - rows is equal to the size of ``annotation_spec_id``. - ``row[i].example_count[j]`` is the number of examples that - have ground truth of the ``annotation_spec_id[i]`` and are - predicted as ``annotation_spec_id[j]`` by the model being - evaluated. - """ - - class Row(proto.Message): - r"""Output only. A row in the confusion matrix. - - Attributes: - example_count (MutableSequence[int]): - Output only. Value of the specific cell in the confusion - matrix. The number of values each row has (i.e. the length - of the row) is equal to the length of the - ``annotation_spec_id`` field or, if that one is not - populated, length of the - [display_name][google.cloud.automl.v1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] - field. - """ - - example_count: MutableSequence[int] = proto.RepeatedField( - proto.INT32, - number=1, - ) - - annotation_spec_id: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=1, - ) - display_name: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=3, - ) - row: MutableSequence['ClassificationEvaluationMetrics.ConfusionMatrix.Row'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ClassificationEvaluationMetrics.ConfusionMatrix.Row', - ) - - au_prc: float = proto.Field( - proto.FLOAT, - number=1, - ) - au_roc: float = proto.Field( - proto.FLOAT, - number=6, - ) - log_loss: float = proto.Field( - proto.FLOAT, - number=7, - ) - confidence_metrics_entry: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=ConfidenceMetricsEntry, - ) - confusion_matrix: ConfusionMatrix = proto.Field( - proto.MESSAGE, - number=4, - message=ConfusionMatrix, - ) - annotation_spec_id: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py deleted file mode 100644 index 0d71bb07..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/data_items.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import geometry -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import text_segment as gca_text_segment - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'Image', - 'TextSnippet', - 'DocumentDimensions', - 'Document', - 'ExamplePayload', - }, -) - - -class Image(proto.Message): - r"""A representation of an image. - Only images up to 30MB in size are supported. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - image_bytes (bytes): - Image content represented as a stream of bytes. Note: As - with all ``bytes`` fields, protobuffers use a pure binary - representation, whereas JSON representations use base64. - - This field is a member of `oneof`_ ``data``. - thumbnail_uri (str): - Output only. HTTP URI to the thumbnail image. - """ - - image_bytes: bytes = proto.Field( - proto.BYTES, - number=1, - oneof='data', - ) - thumbnail_uri: str = proto.Field( - proto.STRING, - number=4, - ) - - -class TextSnippet(proto.Message): - r"""A representation of a text snippet. - - Attributes: - content (str): - Required. The content of the text snippet as - a string. Up to 250000 characters long. - mime_type (str): - Optional. The format of - [content][google.cloud.automl.v1.TextSnippet.content]. - Currently the only two allowed values are "text/html" and - "text/plain". If left blank, the format is automatically - determined from the type of the uploaded - [content][google.cloud.automl.v1.TextSnippet.content]. - content_uri (str): - Output only. HTTP URI where you can download - the content. - """ - - content: str = proto.Field( - proto.STRING, - number=1, - ) - mime_type: str = proto.Field( - proto.STRING, - number=2, - ) - content_uri: str = proto.Field( - proto.STRING, - number=4, - ) - - -class DocumentDimensions(proto.Message): - r"""Message that describes dimension of a document. - - Attributes: - unit (google.cloud.automl_v1.types.DocumentDimensions.DocumentDimensionUnit): - Unit of the dimension. - width (float): - Width value of the document, works together - with the unit. - height (float): - Height value of the document, works together - with the unit. - """ - class DocumentDimensionUnit(proto.Enum): - r"""Unit of the document dimension. - - Values: - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED (0): - Should not be used. - INCH (1): - Document dimension is measured in inches. - CENTIMETER (2): - Document dimension is measured in - centimeters. - POINT (3): - Document dimension is measured in points. 72 - points = 1 inch. - """ - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 - INCH = 1 - CENTIMETER = 2 - POINT = 3 - - unit: DocumentDimensionUnit = proto.Field( - proto.ENUM, - number=1, - enum=DocumentDimensionUnit, - ) - width: float = proto.Field( - proto.FLOAT, - number=2, - ) - height: float = proto.Field( - proto.FLOAT, - number=3, - ) - - -class Document(proto.Message): - r"""A structured text document e.g. a PDF. - - Attributes: - input_config (google.cloud.automl_v1.types.DocumentInputConfig): - An input config specifying the content of the - document. - document_text (google.cloud.automl_v1.types.TextSnippet): - The plain text version of this document. - layout (MutableSequence[google.cloud.automl_v1.types.Document.Layout]): - Describes the layout of the document. Sorted by - [page_number][]. - document_dimensions (google.cloud.automl_v1.types.DocumentDimensions): - The dimensions of the page in the document. - page_count (int): - Number of pages in the document. - """ - - class Layout(proto.Message): - r"""Describes the layout information of a - [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] - in the document. - - Attributes: - text_segment (google.cloud.automl_v1.types.TextSegment): - Text Segment that represents a segment in - [document_text][google.cloud.automl.v1p1beta.Document.document_text]. - page_number (int): - Page number of the - [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] - in the original document, starts from 1. - bounding_poly (google.cloud.automl_v1.types.BoundingPoly): - The position of the - [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] - in the page. Contains exactly 4 - [normalized_vertices][google.cloud.automl.v1p1beta.BoundingPoly.normalized_vertices] - and they are connected by edges in the order provided, which - will represent a rectangle parallel to the frame. The - [NormalizedVertex-s][google.cloud.automl.v1p1beta.NormalizedVertex] - are relative to the page. Coordinates are based on top-left - as point (0,0). - text_segment_type (google.cloud.automl_v1.types.Document.Layout.TextSegmentType): - The type of the - [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] - in document. - """ - class TextSegmentType(proto.Enum): - r"""The type of TextSegment in the context of the original - document. - - Values: - TEXT_SEGMENT_TYPE_UNSPECIFIED (0): - Should not be used. - TOKEN (1): - The text segment is a token. e.g. word. - PARAGRAPH (2): - The text segment is a paragraph. - FORM_FIELD (3): - The text segment is a form field. - FORM_FIELD_NAME (4): - The text segment is the name part of a form field. It will - be treated as child of another FORM_FIELD TextSegment if its - span is subspan of another TextSegment with type FORM_FIELD. - FORM_FIELD_CONTENTS (5): - The text segment is the text content part of a form field. - It will be treated as child of another FORM_FIELD - TextSegment if its span is subspan of another TextSegment - with type FORM_FIELD. - TABLE (6): - The text segment is a whole table, including - headers, and all rows. - TABLE_HEADER (7): - The text segment is a table's headers. It - will be treated as child of another TABLE - TextSegment if its span is subspan of another - TextSegment with type TABLE. - TABLE_ROW (8): - The text segment is a row in table. It will - be treated as child of another TABLE TextSegment - if its span is subspan of another TextSegment - with type TABLE. - TABLE_CELL (9): - The text segment is a cell in table. It will be treated as - child of another TABLE_ROW TextSegment if its span is - subspan of another TextSegment with type TABLE_ROW. - """ - TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 - TOKEN = 1 - PARAGRAPH = 2 - FORM_FIELD = 3 - FORM_FIELD_NAME = 4 - FORM_FIELD_CONTENTS = 5 - TABLE = 6 - TABLE_HEADER = 7 - TABLE_ROW = 8 - TABLE_CELL = 9 - - text_segment: gca_text_segment.TextSegment = proto.Field( - proto.MESSAGE, - number=1, - message=gca_text_segment.TextSegment, - ) - page_number: int = proto.Field( - proto.INT32, - number=2, - ) - bounding_poly: geometry.BoundingPoly = proto.Field( - proto.MESSAGE, - number=3, - message=geometry.BoundingPoly, - ) - text_segment_type: 'Document.Layout.TextSegmentType' = proto.Field( - proto.ENUM, - number=4, - enum='Document.Layout.TextSegmentType', - ) - - input_config: io.DocumentInputConfig = proto.Field( - proto.MESSAGE, - number=1, - message=io.DocumentInputConfig, - ) - document_text: 'TextSnippet' = proto.Field( - proto.MESSAGE, - number=2, - message='TextSnippet', - ) - layout: MutableSequence[Layout] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Layout, - ) - document_dimensions: 'DocumentDimensions' = proto.Field( - proto.MESSAGE, - number=4, - message='DocumentDimensions', - ) - page_count: int = proto.Field( - proto.INT32, - number=5, - ) - - -class ExamplePayload(proto.Message): - r"""Example data used for training or prediction. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - image (google.cloud.automl_v1.types.Image): - Example image. - - This field is a member of `oneof`_ ``payload``. - text_snippet (google.cloud.automl_v1.types.TextSnippet): - Example text. - - This field is a member of `oneof`_ ``payload``. - document (google.cloud.automl_v1.types.Document): - Example document. - - This field is a member of `oneof`_ ``payload``. - """ - - image: 'Image' = proto.Field( - proto.MESSAGE, - number=1, - oneof='payload', - message='Image', - ) - text_snippet: 'TextSnippet' = proto.Field( - proto.MESSAGE, - number=2, - oneof='payload', - message='TextSnippet', - ) - document: 'Document' = proto.Field( - proto.MESSAGE, - number=4, - oneof='payload', - message='Document', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py deleted file mode 100644 index 17dc3e7e..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/dataset.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import image -from google.cloud.automl_v1.types import text -from google.cloud.automl_v1.types import translation -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'Dataset', - }, -) - - -class Dataset(proto.Message): - r"""A workspace for solving a single, particular machine learning - (ML) problem. A workspace contains examples that may be - annotated. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - translation_dataset_metadata (google.cloud.automl_v1.types.TranslationDatasetMetadata): - Metadata for a dataset used for translation. - - This field is a member of `oneof`_ ``dataset_metadata``. - image_classification_dataset_metadata (google.cloud.automl_v1.types.ImageClassificationDatasetMetadata): - Metadata for a dataset used for image - classification. - - This field is a member of `oneof`_ ``dataset_metadata``. - text_classification_dataset_metadata (google.cloud.automl_v1.types.TextClassificationDatasetMetadata): - Metadata for a dataset used for text - classification. - - This field is a member of `oneof`_ ``dataset_metadata``. - image_object_detection_dataset_metadata (google.cloud.automl_v1.types.ImageObjectDetectionDatasetMetadata): - Metadata for a dataset used for image object - detection. - - This field is a member of `oneof`_ ``dataset_metadata``. - text_extraction_dataset_metadata (google.cloud.automl_v1.types.TextExtractionDatasetMetadata): - Metadata for a dataset used for text - extraction. - - This field is a member of `oneof`_ ``dataset_metadata``. - text_sentiment_dataset_metadata (google.cloud.automl_v1.types.TextSentimentDatasetMetadata): - Metadata for a dataset used for text - sentiment. - - This field is a member of `oneof`_ ``dataset_metadata``. - name (str): - Output only. The resource name of the dataset. Form: - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`` - display_name (str): - Required. The name of the dataset to show in the interface. - The name can be up to 32 characters long and can consist - only of ASCII Latin letters A-Z and a-z, underscores (_), - and ASCII digits 0-9. - description (str): - User-provided description of the dataset. The - description can be up to 25000 characters long. - example_count (int): - Output only. The number of examples in the - dataset. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this dataset was - created. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (MutableMapping[str, str]): - Optional. The labels with user-defined - metadata to organize your dataset. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. Label values are optional. Label - keys must start with a letter. - - See https://goo.gl/xmQnxf for more information - on and examples of labels. - """ - - translation_dataset_metadata: translation.TranslationDatasetMetadata = proto.Field( - proto.MESSAGE, - number=23, - oneof='dataset_metadata', - message=translation.TranslationDatasetMetadata, - ) - image_classification_dataset_metadata: image.ImageClassificationDatasetMetadata = proto.Field( - proto.MESSAGE, - number=24, - oneof='dataset_metadata', - message=image.ImageClassificationDatasetMetadata, - ) - text_classification_dataset_metadata: text.TextClassificationDatasetMetadata = proto.Field( - proto.MESSAGE, - number=25, - oneof='dataset_metadata', - message=text.TextClassificationDatasetMetadata, - ) - image_object_detection_dataset_metadata: image.ImageObjectDetectionDatasetMetadata = proto.Field( - proto.MESSAGE, - number=26, - oneof='dataset_metadata', - message=image.ImageObjectDetectionDatasetMetadata, - ) - text_extraction_dataset_metadata: text.TextExtractionDatasetMetadata = proto.Field( - proto.MESSAGE, - number=28, - oneof='dataset_metadata', - message=text.TextExtractionDatasetMetadata, - ) - text_sentiment_dataset_metadata: text.TextSentimentDatasetMetadata = proto.Field( - proto.MESSAGE, - number=30, - oneof='dataset_metadata', - message=text.TextSentimentDatasetMetadata, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - description: str = proto.Field( - proto.STRING, - number=3, - ) - example_count: int = proto.Field( - proto.INT32, - number=21, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - etag: str = proto.Field( - proto.STRING, - number=17, - ) - labels: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=39, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py deleted file mode 100644 index 90b2028d..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/detection.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import geometry - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'ImageObjectDetectionAnnotation', - 'BoundingBoxMetricsEntry', - 'ImageObjectDetectionEvaluationMetrics', - }, -) - - -class ImageObjectDetectionAnnotation(proto.Message): - r"""Annotation details for image object detection. - - Attributes: - bounding_box (google.cloud.automl_v1.types.BoundingPoly): - Output only. The rectangle representing the - object location. - score (float): - Output only. The confidence that this annotation is positive - for the parent example, value in [0, 1], higher means higher - positivity confidence. - """ - - bounding_box: geometry.BoundingPoly = proto.Field( - proto.MESSAGE, - number=1, - message=geometry.BoundingPoly, - ) - score: float = proto.Field( - proto.FLOAT, - number=2, - ) - - -class BoundingBoxMetricsEntry(proto.Message): - r"""Bounding box matching model metrics for a single - intersection-over-union threshold and multiple label match - confidence thresholds. - - Attributes: - iou_threshold (float): - Output only. The intersection-over-union - threshold value used to compute this metrics - entry. - mean_average_precision (float): - Output only. The mean average precision, most often close to - au_prc. - confidence_metrics_entries (MutableSequence[google.cloud.automl_v1.types.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): - Output only. Metrics for each label-match - confidence_threshold from - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall - curve is derived from them. - """ - - class ConfidenceMetricsEntry(proto.Message): - r"""Metrics for a single confidence threshold. - - Attributes: - confidence_threshold (float): - Output only. The confidence threshold value - used to compute the metrics. - recall (float): - Output only. Recall under the given - confidence threshold. - precision (float): - Output only. Precision under the given - confidence threshold. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - """ - - confidence_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - recall: float = proto.Field( - proto.FLOAT, - number=2, - ) - precision: float = proto.Field( - proto.FLOAT, - number=3, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=4, - ) - - iou_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - mean_average_precision: float = proto.Field( - proto.FLOAT, - number=2, - ) - confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=ConfidenceMetricsEntry, - ) - - -class ImageObjectDetectionEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for image object detection problems. - Evaluates prediction quality of labeled bounding boxes. - - Attributes: - evaluated_bounding_box_count (int): - Output only. The total number of bounding - boxes (i.e. summed over all images) the ground - truth used to create this evaluation had. - bounding_box_metrics_entries (MutableSequence[google.cloud.automl_v1.types.BoundingBoxMetricsEntry]): - Output only. The bounding boxes match metrics - for each Intersection-over-union threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each - label confidence threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. - bounding_box_mean_average_precision (float): - Output only. The single metric for bounding boxes - evaluation: the mean_average_precision averaged over all - bounding_box_metrics_entries. - """ - - evaluated_bounding_box_count: int = proto.Field( - proto.INT32, - number=1, - ) - bounding_box_metrics_entries: MutableSequence['BoundingBoxMetricsEntry'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='BoundingBoxMetricsEntry', - ) - bounding_box_mean_average_precision: float = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py deleted file mode 100644 index f9c36025..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/geometry.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'NormalizedVertex', - 'BoundingPoly', - }, -) - - -class NormalizedVertex(proto.Message): - r"""A vertex represents a 2D point in the image. - The normalized vertex coordinates are between 0 to 1 fractions - relative to the original plane (image, video). E.g. if the plane - (e.g. whole image) would have size 10 x 20 then a point with - normalized coordinates (0.1, 0.3) would be at the position (1, - 6) on that plane. - - Attributes: - x (float): - Required. Horizontal coordinate. - y (float): - Required. Vertical coordinate. - """ - - x: float = proto.Field( - proto.FLOAT, - number=1, - ) - y: float = proto.Field( - proto.FLOAT, - number=2, - ) - - -class BoundingPoly(proto.Message): - r"""A bounding polygon of a detected object on a plane. On output both - vertices and normalized_vertices are provided. The polygon is formed - by connecting vertices in the order they are listed. - - Attributes: - normalized_vertices (MutableSequence[google.cloud.automl_v1.types.NormalizedVertex]): - Output only . The bounding polygon normalized - vertices. - """ - - normalized_vertices: MutableSequence['NormalizedVertex'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='NormalizedVertex', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/image.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/image.py deleted file mode 100644 index 522af62f..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/image.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import classification - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'ImageClassificationDatasetMetadata', - 'ImageObjectDetectionDatasetMetadata', - 'ImageClassificationModelMetadata', - 'ImageObjectDetectionModelMetadata', - 'ImageClassificationModelDeploymentMetadata', - 'ImageObjectDetectionModelDeploymentMetadata', - }, -) - - -class ImageClassificationDatasetMetadata(proto.Message): - r"""Dataset metadata that is specific to image classification. - - Attributes: - classification_type (google.cloud.automl_v1.types.ClassificationType): - Required. Type of the classification problem. - """ - - classification_type: classification.ClassificationType = proto.Field( - proto.ENUM, - number=1, - enum=classification.ClassificationType, - ) - - -class ImageObjectDetectionDatasetMetadata(proto.Message): - r"""Dataset metadata specific to image object detection. - """ - - -class ImageClassificationModelMetadata(proto.Message): - r"""Model metadata for image classification. - - Attributes: - base_model_id (str): - Optional. The ID of the ``base`` model. If it is specified, - the new model will be created based on the ``base`` model. - Otherwise, the new model will be created from scratch. The - ``base`` model must be in the same ``project`` and - ``location`` as the new model to create, and have the same - ``model_type``. - train_budget_milli_node_hours (int): - Optional. The train budget of creating this model, expressed - in milli node hours i.e. 1,000 value in this field means 1 - node hour. The actual ``train_cost`` will be equal or less - than this value. If further model training ceases to provide - any improvements, it will stop without using full budget and - the stop_reason will be ``MODEL_CONVERGED``. Note, node_hour - = actual_hour \* number_of_nodes_invovled. For model type - ``cloud``\ (default), the train budget must be between 8,000 - and 800,000 milli node hours, inclusive. The default value - is 192, 000 which represents one day in wall time. For model - type ``mobile-low-latency-1``, ``mobile-versatile-1``, - ``mobile-high-accuracy-1``, - ``mobile-core-ml-low-latency-1``, - ``mobile-core-ml-versatile-1``, - ``mobile-core-ml-high-accuracy-1``, the train budget must be - between 1,000 and 100,000 milli node hours, inclusive. The - default value is 24, 000 which represents one day in wall - time. - train_cost_milli_node_hours (int): - Output only. The actual train cost of - creating this model, expressed in milli node - hours, i.e. 1,000 value in this field means 1 - node hour. Guaranteed to not exceed the train - budget. - stop_reason (str): - Output only. The reason that this create model operation - stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. - model_type (str): - Optional. Type of the model. The available values are: - - - ``cloud`` - Model to be used via prediction calls to - AutoML API. This is the default value. - - ``mobile-low-latency-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have low latency, but may have - lower prediction quality than other models. - - ``mobile-versatile-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. - - ``mobile-high-accuracy-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have a higher latency, but should - also have a higher prediction quality than other models. - - ``mobile-core-ml-low-latency-1`` - A model that, in - addition to providing prediction via AutoML API, can also - be exported (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile device with Core ML afterwards. - Expected to have low latency, but may have lower - prediction quality than other models. - - ``mobile-core-ml-versatile-1`` - A model that, in - addition to providing prediction via AutoML API, can also - be exported (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile device with Core ML afterwards. - - ``mobile-core-ml-high-accuracy-1`` - A model that, in - addition to providing prediction via AutoML API, can also - be exported (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile device with Core ML afterwards. - Expected to have a higher latency, but should also have a - higher prediction quality than other models. - node_qps (float): - Output only. An approximate number of online - prediction QPS that can be supported by this - model per each node on which it is deployed. - node_count (int): - Output only. The number of nodes this model is deployed on. - A node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the node_qps field. - """ - - base_model_id: str = proto.Field( - proto.STRING, - number=1, - ) - train_budget_milli_node_hours: int = proto.Field( - proto.INT64, - number=16, - ) - train_cost_milli_node_hours: int = proto.Field( - proto.INT64, - number=17, - ) - stop_reason: str = proto.Field( - proto.STRING, - number=5, - ) - model_type: str = proto.Field( - proto.STRING, - number=7, - ) - node_qps: float = proto.Field( - proto.DOUBLE, - number=13, - ) - node_count: int = proto.Field( - proto.INT64, - number=14, - ) - - -class ImageObjectDetectionModelMetadata(proto.Message): - r"""Model metadata specific to image object detection. - - Attributes: - model_type (str): - Optional. Type of the model. The available values are: - - - ``cloud-high-accuracy-1`` - (default) A model to be used - via prediction calls to AutoML API. Expected to have a - higher latency, but should also have a higher prediction - quality than other models. - - ``cloud-low-latency-1`` - A model to be used via - prediction calls to AutoML API. Expected to have low - latency, but may have lower prediction quality than other - models. - - ``mobile-low-latency-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have low latency, but may have - lower prediction quality than other models. - - ``mobile-versatile-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. - - ``mobile-high-accuracy-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have a higher latency, but should - also have a higher prediction quality than other models. - node_count (int): - Output only. The number of nodes this model is deployed on. - A node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the qps_per_node - field. - node_qps (float): - Output only. An approximate number of online - prediction QPS that can be supported by this - model per each node on which it is deployed. - stop_reason (str): - Output only. The reason that this create model operation - stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. - train_budget_milli_node_hours (int): - Optional. The train budget of creating this model, expressed - in milli node hours i.e. 1,000 value in this field means 1 - node hour. The actual ``train_cost`` will be equal or less - than this value. If further model training ceases to provide - any improvements, it will stop without using full budget and - the stop_reason will be ``MODEL_CONVERGED``. Note, node_hour - = actual_hour \* number_of_nodes_invovled. For model type - ``cloud-high-accuracy-1``\ (default) and - ``cloud-low-latency-1``, the train budget must be between - 20,000 and 900,000 milli node hours, inclusive. The default - value is 216, 000 which represents one day in wall time. For - model type ``mobile-low-latency-1``, ``mobile-versatile-1``, - ``mobile-high-accuracy-1``, - ``mobile-core-ml-low-latency-1``, - ``mobile-core-ml-versatile-1``, - ``mobile-core-ml-high-accuracy-1``, the train budget must be - between 1,000 and 100,000 milli node hours, inclusive. The - default value is 24, 000 which represents one day in wall - time. - train_cost_milli_node_hours (int): - Output only. The actual train cost of - creating this model, expressed in milli node - hours, i.e. 1,000 value in this field means 1 - node hour. Guaranteed to not exceed the train - budget. - """ - - model_type: str = proto.Field( - proto.STRING, - number=1, - ) - node_count: int = proto.Field( - proto.INT64, - number=3, - ) - node_qps: float = proto.Field( - proto.DOUBLE, - number=4, - ) - stop_reason: str = proto.Field( - proto.STRING, - number=5, - ) - train_budget_milli_node_hours: int = proto.Field( - proto.INT64, - number=6, - ) - train_cost_milli_node_hours: int = proto.Field( - proto.INT64, - number=7, - ) - - -class ImageClassificationModelDeploymentMetadata(proto.Message): - r"""Model deployment metadata specific to Image Classification. - - Attributes: - node_count (int): - Input only. The number of nodes to deploy the model on. A - node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the model's - [node_qps][google.cloud.automl.v1.ImageClassificationModelMetadata.node_qps]. - Must be between 1 and 100, inclusive on both ends. - """ - - node_count: int = proto.Field( - proto.INT64, - number=1, - ) - - -class ImageObjectDetectionModelDeploymentMetadata(proto.Message): - r"""Model deployment metadata specific to Image Object Detection. - - Attributes: - node_count (int): - Input only. The number of nodes to deploy the model on. A - node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the model's - [qps_per_node][google.cloud.automl.v1.ImageObjectDetectionModelMetadata.qps_per_node]. - Must be between 1 and 100, inclusive on both ends. - """ - - node_count: int = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/io.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/io.py deleted file mode 100644 index 72e0972d..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/io.py +++ /dev/null @@ -1,1572 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'InputConfig', - 'BatchPredictInputConfig', - 'DocumentInputConfig', - 'OutputConfig', - 'BatchPredictOutputConfig', - 'ModelExportOutputConfig', - 'GcsSource', - 'GcsDestination', - }, -) - - -class InputConfig(proto.Message): - r"""Input configuration for - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] - action. - - The format of input depends on dataset_metadata the Dataset into - which the import is happening has. As input source the - [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is - expected, unless specified otherwise. Additionally any input .CSV - file by itself must be 100MB or smaller, unless specified otherwise. - If an "example" file (that is, image, video etc.) with identical - content (even if it had different ``GCS_FILE_PATH``) is mentioned - multiple times, then its label, bounding boxes etc. are appended. - The same file should be always provided with the same ``ML_USE`` and - ``GCS_FILE_PATH``, if it is not, then these values are - nondeterministically selected from the given ones. - - The formats are represented in EBNF with commas being literal and - with non-terminal symbols defined near the end of this comment. The - formats are: - - .. raw:: html - -

AutoML Vision

- - .. raw:: html - -
Classification
- - See `Preparing your training - data `__ for - more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH,LABEL,LABEL,... - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They - are Automatically divided into train and test data. 80% for - training and 20% for testing. - - - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image - of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, - .WEBP, .BMP, .TIFF, .ICO. - - - ``LABEL`` - A label that identifies the object in the image. - - For the ``MULTICLASS`` classification type, at most one ``LABEL`` is - allowed per image. If an image has not yet been labeled, then it - should be mentioned just once with no ``LABEL``. - - Some sample rows: - - :: - - TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg - - .. raw:: html - -
Object Detection
- See [Preparing your training - data](https://cloud.google.com/vision/automl/object-detection/docs/prepare) - for more information. - - A CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They - are Automatically divided into train and test data. 80% for - training and 20% for testing. - - - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image - of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. - Each image is assumed to be exhaustively labeled. - - - ``LABEL`` - A label that identifies the object in the image - specified by the ``BOUNDING_BOX``. - - - ``BOUNDING BOX`` - The vertices of an object in the example - image. The minimum allowed ``BOUNDING_BOX`` edge length is 0.01, - and no more than 500 ``BOUNDING_BOX`` instances per image are - allowed (one ``BOUNDING_BOX`` per line). If an image has no - looked for objects then it should be mentioned just once with no - LABEL and the ",,,,,,," in place of the ``BOUNDING_BOX``. - - **Four sample rows:** - - :: - - TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, - - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Video Intelligence

- - .. raw:: html - -
Classification
- - See `Preparing your training - data `__ - for more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH - - For ``ML_USE``, do not use ``VALIDATE``. - - ``GCS_FILE_PATH`` is the path to another .csv file that describes - training example for a given ``ML_USE``, using the following row - format: - - :: - - GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) - - Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up - to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - - ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the - length of the video, and the end time must be after the start time. - Any segment of a video which has one or more labels on it, is - considered a hard negative for all other labels. Any segment with no - labels on it is considered to be unknown. If a whole video is - unknown, then it should be mentioned just once with ",," in place of - ``LABEL, TIME_SEGMENT_START,TIME_SEGMENT_END``. - - Sample top level CSV file: - - :: - - TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv - - Sample rows of a CSV file for a particular ML_USE: - - :: - - gs://folder/video1.avi,car,120,180.000021 - gs://folder/video1.avi,bike,150,180.000021 - gs://folder/vid2.avi,car,0,60.5 - gs://folder/vid3.avi,,, - - .. raw:: html - -
Object Tracking
- - See `Preparing your training - data `__ - for more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH - - For ``ML_USE``, do not use ``VALIDATE``. - - ``GCS_FILE_PATH`` is the path to another .csv file that describes - training example for a given ``ML_USE``, using the following row - format: - - :: - - GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX - - or - - :: - - GCS_FILE_PATH,,,,,,,,,, - - Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up - to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - Providing ``INSTANCE_ID``\ s can help to obtain a better model. When - a specific labeled entity leaves the video frame, and shows up - afterwards it is not required, albeit preferable, that the same - ``INSTANCE_ID`` is given to it. - - ``TIMESTAMP`` must be within the length of the video, the - ``BOUNDING_BOX`` is assumed to be drawn on the closest video's frame - to the ``TIMESTAMP``. Any mentioned by the ``TIMESTAMP`` frame is - expected to be exhaustively labeled and no more than 500 - ``BOUNDING_BOX``-es per frame are allowed. If a whole video is - unknown, then it should be mentioned just once with ",,,,,,,,,," in - place of ``LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. - - Sample top level CSV file: - - :: - - TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv - - Seven sample rows of a CSV file for a particular ML_USE: - - :: - - gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 - gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 - gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 - gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, - gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, - gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, - gs://folder/video2.avi,,,,,,,,,,, - - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Natural Language

- - .. raw:: html - -
Entity Extraction
- - See `Preparing your training - data `__ for - more information. - - One or more CSV file(s) with each line in the following format: - - :: - - ML_USE,GCS_FILE_PATH - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They - are Automatically divided into train and test data. 80% for - training and 20% for testing.. - - - ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored - in Google Cloud Storage that contains in-line text in-line as - documents for model training. - - After the training data set has been determined from the ``TRAIN`` - and ``UNASSIGNED`` CSV files, the training data is divided into - train and validation data sets. 70% for training and 30% for - validation. - - For example: - - :: - - TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl - TEST,gs://folder/file3.jsonl - - **In-line JSONL files** - - In-line .JSONL files contain, per line, a JSON document that wraps a - [``text_snippet``][google.cloud.automl.v1.TextSnippet] field - followed by one or more - [``annotations``][google.cloud.automl.v1.AnnotationPayload] fields, - which have ``display_name`` and ``text_extraction`` fields to - describe the entity from the text snippet. Multiple JSON documents - can be separated using line breaks (\n). - - The supplied text must be annotated exhaustively. For example, if - you include the text "horse", but do not label it as "animal", then - "horse" is assumed to not be an "animal". - - Any given text snippet content must have 30,000 characters or less, - and also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC - encoded. - - For example: - - :: - - { - "text_snippet": { - "content": "dog car cat" - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 2} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 6} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 10} - } - } - ] - }\n - { - "text_snippet": { - "content": "This dog is good." - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 5, "end_offset": 7} - } - } - ] - } - - **JSONL files that reference documents** - - .JSONL files contain, per line, a JSON document that wraps a - ``input_config`` that contains the path to a source document. - Multiple JSON documents can be separated using line breaks (\n). - - Supported document extensions: .PDF, .TIF, .TIFF - - For example: - - :: - - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] - } - } - } - } - - **In-line JSONL files with document layout information** - - **Note:** You can only annotate documents using the UI. The format - described below applies to annotated documents exported using the UI - or ``exportData``. - - In-line .JSONL files for documents contain, per line, a JSON - document that wraps a ``document`` field that provides the textual - content of the document and the layout information. - - For example: - - :: - - { - "document": { - "document_text": { - "content": "dog car cat" - } - "layout": [ - { - "text_segment": { - "start_offset": 0, - "end_offset": 11, - }, - "page_number": 1, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ], - }, - "text_segment_type": TOKEN, - } - ], - "document_dimensions": { - "width": 8.27, - "height": 11.69, - "unit": INCH, - } - "page_count": 3, - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 3} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 7} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 11} - } - }, - ], - - .. raw:: html - -
Classification
- - See `Preparing your training - data `__ - for more information. - - One or more CSV file(s) with each line in the following format: - - :: - - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They - are Automatically divided into train and test data. 80% for - training and 20% for testing. - - - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a - pattern. If the column content is a valid Google Cloud Storage - file path, that is, prefixed by "gs://", it is treated as a - ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in - double quotes (""), it is treated as a ``TEXT_SNIPPET``. For - ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example, - "gs://folder/content.txt" AutoML imports the file content as a - text snippet. For ``TEXT_SNIPPET``, AutoML imports the column - content excluding quotes. In both cases, size of the content must - be 10MB or less in size. For zip files, the size of each file - inside the zip must be 10MB or less in size. - - For the ``MULTICLASS`` classification type, at most one ``LABEL`` - is allowed. - - The ``ML_USE`` and ``LABEL`` columns are optional. Supported file - extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - - A maximum of 100 unique labels are allowed per CSV row. - - Sample rows: - - :: - - TRAIN,"They have bad food and very rude",RudeService,BadFood - gs://folder/content.txt,SlowService - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,BadFood - - .. raw:: html - -
Sentiment Analysis
- - See `Preparing your training - data `__ - for more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They - are Automatically divided into train and test data. 80% for - training and 20% for testing. - - - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a - pattern. If the column content is a valid Google Cloud Storage - file path, that is, prefixed by "gs://", it is treated as a - ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in - double quotes (""), it is treated as a ``TEXT_SNIPPET``. For - ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example, - "gs://folder/content.txt" AutoML imports the file content as a - text snippet. For ``TEXT_SNIPPET``, AutoML imports the column - content excluding quotes. In both cases, size of the content must - be 128kB or less in size. For zip files, the size of each file - inside the zip must be 128kB or less in size. - - The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported - file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - - - ``SENTIMENT`` - An integer between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max - (inclusive). Describes the ordinal of the sentiment - higher - value means a more positive sentiment. All the values are - completely relative, i.e. neither 0 needs to mean a negative or - neutral sentiment nor sentiment_max needs to mean a positive one - - it is just required that 0 is the least positive sentiment in - the data, and sentiment_max is the most positive one. The - SENTIMENT shouldn't be confused with "score" or "magnitude" from - the previous Natural Language Sentiment Analysis API. All - SENTIMENT values between 0 and sentiment_max must be represented - in the imported data. On prediction the same 0 to sentiment_max - range will be used. The difference between neighboring sentiment - values needs not to be uniform, e.g. 1 and 2 may be similar - whereas the difference between 2 and 3 may be large. - - Sample rows: - - :: - - TRAIN,"@freewrytin this is way too good for your product",2 - gs://folder/content.txt,3 - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,2 - - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Tables

- - See `Preparing your training - data `__ for - more information. - - You can use either - [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or - [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. - All input is concatenated into a single - [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] - - **For gcs_source:** - - CSV file(s), where the first row of the first file is the header, - containing unique column names. If the first row of a subsequent - file is the same as the header, then it is also treated as a header. - All other rows contain values for the corresponding columns. - - Each .CSV file by itself must be 10GB or smaller, and their total - size must be 100GB or smaller. - - First three sample rows of a CSV file: - - .. raw:: html - -
-        "Id","First Name","Last Name","Dob","Addresses"
-        "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-        "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
-        
- - **For bigquery_source:** - - An URI of a BigQuery table. The user data size of the BigQuery table - must be 100GB or smaller. - - An imported table must have between 2 and 1,000 columns, inclusive, - and between 1000 and 100,000,000 rows, inclusive. There are at most - 5 import data running in parallel. - - .. raw:: html - -
-
- - **Input field definitions:** - - ``ML_USE`` : ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED") - Describes how the given example (file) should be used for model - training. "UNASSIGNED" can be used when user has no preference. - - ``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For - example, "gs://folder/image1.png". - - ``LABEL`` : A display name of an object on an image, video etc., - e.g. "dog". Must be up to 32 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscores(_), and ASCII digits - 0-9. For each label an AnnotationSpec is created which display_name - becomes the label; AnnotationSpecs are given back in predictions. - - ``INSTANCE_ID`` : A positive integer that identifies a specific - instance of a labeled entity on an example. Used e.g. to track two - cars on a video while being able to tell apart which one is which. - - ``BOUNDING_BOX`` : (``VERTEX,VERTEX,VERTEX,VERTEX`` \| - ``VERTEX,,,VERTEX,,``) A rectangle parallel to the frame of the - example (image, video). If 4 vertices are given they are connected - by edges in the order provided, if 2 are given they are recognized - as diagonally opposite vertices of the rectangle. - - ``VERTEX`` : (``COORDINATE,COORDINATE``) First coordinate is - horizontal (x), the second is vertical (y). - - ``COORDINATE`` : A float in 0 to 1 range, relative to total length - of image or video in given dimension. For fractions the leading - non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top - left. - - ``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning, - inclusive, of a time segment within an example that has a time - dimension (e.g. video). - - ``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end, - exclusive, of a time segment within n example that has a time - dimension (e.g. video). - - ``TIME_OFFSET`` : A number of seconds as measured from the start of - an example (e.g. video). Fractions are allowed, up to a microsecond - precision. "inf" is allowed, and it means the end of the example. - - ``TEXT_SNIPPET`` : The content of a text snippet, UTF-8 encoded, - enclosed within double quotes (""). - - ``DOCUMENT`` : A field that provides the textual content with - document and the layout information. - - **Errors:** - - If any of the provided CSV files can't be parsed or if more than - certain percent of CSV rows cannot be processed then the operation - fails and nothing is imported. Regardless of overall success or - failure the per-row failures, up to a certain count cap, is listed - in Operation.metadata.partial_failures. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.automl_v1.types.GcsSource): - The Google Cloud Storage location for the input content. For - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], - ``gcs_source`` points to a CSV file with a structure - described in - [InputConfig][google.cloud.automl.v1.InputConfig]. - - This field is a member of `oneof`_ ``source``. - params (MutableMapping[str, str]): - Additional domain-specific parameters describing the - semantic of the imported data, any string must be up to - 25000 characters long. - - .. raw:: html - -

AutoML Tables

- - ``schema_inference_version`` : (integer) This value must be - supplied. The version of the algorithm to use for the - initial inference of the column data types of the imported - table. Allowed values: "1". - """ - - gcs_source: 'GcsSource' = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message='GcsSource', - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class BatchPredictInputConfig(proto.Message): - r"""Input configuration for BatchPredict Action. - - The format of input depends on the ML problem of the model used for - prediction. As input source the - [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is - expected, unless specified otherwise. - - The formats are represented in EBNF with commas being literal and - with non-terminal symbols defined near the end of this comment. The - formats are: - - .. raw:: html - -

AutoML Vision

-
Classification
- - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - The Google Cloud Storage location of an image of up to 30MB in size. - Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the - ID in the batch predict output. - - Sample rows: - - :: - - gs://folder/image1.jpeg - gs://folder/image2.gif - gs://folder/image3.png - - .. raw:: html - -
Object Detection
- - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - The Google Cloud Storage location of an image of up to 30MB in size. - Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the - ID in the batch predict output. - - Sample rows: - - :: - - gs://folder/image1.jpeg - gs://folder/image2.gif - gs://folder/image3.png - - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Video Intelligence

-
Classification
- - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up - to 50GB in size and up to 3h in duration duration. Supported - extensions: .MOV, .MPEG4, .MP4, .AVI. - - ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the - length of the video, and the end time must be after the start time. - - Sample rows: - - :: - - gs://folder/video1.mp4,10,40 - gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - - .. raw:: html - -
Object Tracking
- - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of video up - to 50GB in size and up to 3h in duration duration. Supported - extensions: .MOV, .MPEG4, .MP4, .AVI. - - ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the - length of the video, and the end time must be after the start time. - - Sample rows: - - :: - - gs://folder/video1.mp4,10,40 - gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Natural Language

-
Classification
- - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text - file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF - - Text files can be no larger than 10MB in size. - - Sample rows: - - :: - - gs://folder/text1.txt - gs://folder/text2.pdf - gs://folder/text3.tif - - .. raw:: html - -
Sentiment Analysis
- One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text - file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF - - Text files can be no larger than 128kB in size. - - Sample rows: - - :: - - gs://folder/text1.txt - gs://folder/text2.pdf - gs://folder/text3.tif - - .. raw:: html - -
Entity Extraction
- - One or more JSONL (JSON Lines) files that either provide inline text - or documents. You can only use one format, either inline text or - documents, for a single call to [AutoMl.BatchPredict]. - - Each JSONL file contains a per line a proto that wraps a temporary - user-assigned TextSnippet ID (string up to 2000 characters long) - called "id", a TextSnippet proto (in JSON representation) and zero - or more TextFeature protos. Any given text snippet content must have - 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII - already is). The IDs provided should be unique. - - Each document JSONL file contains, per line, a proto that wraps a - Document proto with ``input_config`` set. Each document cannot - exceed 2MB in size. - - Supported document extensions: .PDF, .TIF, .TIFF - - Each JSONL file must not exceed 100MB in size, and no more than 20 - JSONL files may be passed. - - Sample inline JSONL file (Shown with artificial line breaks. Actual - line breaks are denoted by "\n".): - - :: - - { - "id": "my_first_id", - "text_snippet": { "content": "dog car cat"}, - "text_features": [ - { - "text_segment": {"start_offset": 4, "end_offset": 6}, - "structural_type": PARAGRAPH, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ] - }, - } - ], - }\n - { - "id": "2", - "text_snippet": { - "content": "Extended sample content", - "mime_type": "text/plain" - } - } - - Sample document JSONL file (Shown with artificial line breaks. - Actual line breaks are denoted by "\n".): - - :: - - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] - } - } - } - } - - .. raw:: html - -
-
- - .. raw:: html - -

AutoML Tables

- - See `Preparing your training - data `__ - for more information. - - You can use either - [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source] - or [bigquery_source][BatchPredictInputConfig.bigquery_source]. - - **For gcs_source:** - - CSV file(s), each by itself 10GB or smaller and total size must be - 100GB or smaller, where first file must have a header containing - column names. If the first row of a subsequent file is the same as - the header, then it is also treated as a header. All other rows - contain values for the corresponding columns. - - The column names must contain the model's - [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] - [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] - (order doesn't matter). The columns corresponding to the model's - input feature column specs must contain values compatible with the - column spec's data types. Prediction on all the rows, i.e. the CSV - lines, will be attempted. - - Sample rows from a CSV file: - - .. raw:: html - -
-        "First Name","Last Name","Dob","Addresses"
-        "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
-        "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
-        
- - **For bigquery_source:** - - The URI of a BigQuery table. The user data size of the BigQuery - table must be 100GB or smaller. - - The column names must contain the model's - [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] - [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] - (order doesn't matter). The columns corresponding to the model's - input feature column specs must contain values compatible with the - column spec's data types. Prediction on all the rows of the table - will be attempted. - - .. raw:: html - -
-
- - **Input field definitions:** - - ``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For - example, "gs://folder/video.avi". - - ``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning, - inclusive, of a time segment within an example that has a time - dimension (e.g. video). - - ``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end, - exclusive, of a time segment within n example that has a time - dimension (e.g. video). - - ``TIME_OFFSET`` : A number of seconds as measured from the start of - an example (e.g. video). Fractions are allowed, up to a microsecond - precision. "inf" is allowed, and it means the end of the example. - - **Errors:** - - If any of the provided CSV files can't be parsed or if more than - certain percent of CSV rows cannot be processed then the operation - fails and prediction does not happen. Regardless of overall success - or failure the per-row failures, up to a certain count cap, will be - listed in Operation.metadata.partial_failures. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.automl_v1.types.GcsSource): - Required. The Google Cloud Storage location - for the input content. - - This field is a member of `oneof`_ ``source``. - """ - - gcs_source: 'GcsSource' = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message='GcsSource', - ) - - -class DocumentInputConfig(proto.Message): - r"""Input configuration of a - [Document][google.cloud.automl.v1.Document]. - - Attributes: - gcs_source (google.cloud.automl_v1.types.GcsSource): - The Google Cloud Storage location of the - document file. Only a single path should be - given. - - Max supported size: 512MB. - - Supported extensions: .PDF. - """ - - gcs_source: 'GcsSource' = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class OutputConfig(proto.Message): - r"""- For Translation: CSV file ``translation.csv``, with each line in - format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file - which describes examples that have given ML_USE, using the - following row format per line: TEXT_SNIPPET (in source language) - \\t TEXT_SNIPPET (in target language) - - - For Tables: Output depends on whether the dataset was imported - from Google Cloud Storage or BigQuery. Google Cloud Storage - case: - [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] - must be set. Exported are CSV file(s) ``tables_1.csv``, - ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as - header line the table's column names, and all other lines - contain values for the header columns. BigQuery case: - [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given - project a new dataset will be created with name - ``export_data__`` - where will be made BigQuery-dataset-name compatible (e.g. most - special characters will become underscores), and timestamp - will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" - format. In that dataset a new table called ``primary_table`` - will be created, and filled with precisely the same data as - this obtained on import. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.automl_v1.types.GcsDestination): - Required. The Google Cloud Storage location where the output - is to be written to. For Image Object Detection, Text - Extraction, Video Classification and Tables, in the given - directory a new directory will be created with name: - export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. All export output will be written into that - directory. - - This field is a member of `oneof`_ ``destination``. - """ - - gcs_destination: 'GcsDestination' = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message='GcsDestination', - ) - - -class BatchPredictOutputConfig(proto.Message): - r"""Output configuration for BatchPredict Action. - - As destination the - [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination] - must be set unless specified otherwise for a domain. If - gcs_destination is set then in the given directory a new directory - is created. Its name will be "prediction--", where timestamp is in - YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends - on the ML problem the predictions are made for. - - - For Image Classification: In the created directory files - ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of the successfully predicted images and annotations. A - single image will be listed only once with all its annotations, - and its annotations will never be split across files. Each .JSONL - file will contain, per line, a JSON representation of a proto - that wraps image's "ID" : "" followed by a list of zero - or more AnnotationPayload protos (called annotations), which have - classification detail populated. If prediction for any image - failed (partially or completely), then an additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` - files will be created (N depends on total number of failed - predictions). These files will have a JSON representation of a - proto that wraps the same "ID" : "" but here followed - by exactly one - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. - - - For Image Object Detection: In the created directory files - ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of the successfully predicted images and annotations. Each - .JSONL file will contain, per line, a JSON representation of a - proto that wraps image's "ID" : "" followed by a list - of zero or more AnnotationPayload protos (called annotations), - which have image_object_detection detail populated. A single - image will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for - any image failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` - files will be created (N depends on total number of failed - predictions). These files will have a JSON representation of a - proto that wraps the same "ID" : "" but here followed - by exactly one - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. - - - For Video Classification: In the created directory a - video_classification.csv file, and a .JSON file per each video - classification requested in the input (i.e. each line in given - CSV(s)), will be created. - - :: - - The format of video_classification.csv is: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 - the prediction input lines (i.e. video_classification.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. - STATUS = "OK" if prediction completed successfully, or an error code - with message otherwise. If STATUS is not "OK" then the .JSON file - for that line may not exist or be empty. - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for the video time segment the file is assigned to in the - video_classification.csv. All AnnotationPayload protos will have - video_classification field set, and will be sorted by - video_classification.type field (note that the returned types are - governed by `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). - - - For Video Object Tracking: In the created directory a - video_object_tracking.csv file will be created, and multiple - files video_object_trackinng_1.json, - video_object_trackinng_2.json,..., video_object_trackinng_N.json, - where N is the number of requests in the input (i.e. the number - of lines in given CSV(s)). - - :: - - The format of video_object_tracking.csv is: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 - the prediction input lines (i.e. video_object_tracking.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. - STATUS = "OK" if prediction completed successfully, or an error - code with message otherwise. If STATUS is not "OK" then the .JSON - file for that line may not exist or be empty. - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for each frame of the video time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. - - - For Text Classification: In the created directory files - ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text file (or document) in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text file (or - document) will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any input file (or document) failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input file followed by exactly one - [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - containing only `code` and `message`. - - - For Text Sentiment: In the created directory files - ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will - be created, where N may be 1, and depends on the total number of - inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text file (or document) in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text file (or - document) will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any input file (or document) failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input file followed by exactly one - [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - containing only `code` and `message`. - - - For Text Extraction: In the created directory files - ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of inputs and annotations found. The contents of these - .JSONL file(s) depend on whether the input used inline text, or - documents. If input was inline, then each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - given in request text snippet's "id" (if specified), followed by - input text snippet, and a list of zero or more AnnotationPayload - protos (called annotations), which have text_extraction detail - populated. A single text snippet will be listed only once with - all its annotations, and its annotations will never be split - across files. If input used documents, then each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - given in request document proto, followed by its OCR-ed - representation in the form of a text snippet, finally followed by - a list of zero or more AnnotationPayload protos (called - annotations), which have text_extraction detail populated and - refer, via their indices, to the OCR-ed text snippet. A single - document (and its text snippet) will be listed only once with all - its annotations, and its annotations will never be split across - files. If prediction for any text snippet failed (partially or - completely), then additional ``errors_1.jsonl``, - ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created - (N depends on total number of failed predictions). These files - will have a JSON representation of a proto that wraps either the - "id" : "" (in case of inline) or the document proto (in - case of document) but here followed by exactly one - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Tables: Output depends on whether - [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] - or - [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] - is set (either is allowed). Google Cloud Storage case: In the - created directory files ``tables_1.csv``, ``tables_2.csv``,..., - ``tables_N.csv`` will be created, where N may be 1, and depends - on the total number of the successfully predicted rows. For all - CLASSIFICATION - [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns' - [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] - given on input followed by M target column names in the format of - "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>*\ score" - where M is the number of distinct target values, i.e. number of - distinct values in the target column of the table used to train - the model. Subsequent lines will contain the respective values of - successfully predicted rows, with the last, i.e. the target, - columns having the corresponding prediction - [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. - For REGRESSION and FORECASTING - [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns' - [display_name-s][google.cloud.automl.v1p1beta.display_name] given - on input followed by the predicted target column with name in the - format of - "predicted\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" - Subsequent lines will contain the respective values of - successfully predicted rows, with the last, i.e. the target, - column having the predicted target value. If prediction for any - rows failed, then an additional ``errors_1.csv``, - ``errors_2.csv``,..., ``errors_N.csv`` will be created (N depends - on total number of failed rows). These files will have analogous - format as ``tables_*.csv``, but always with a single target - column - having*\ ```google.rpc.Status`` `__\ *represented - as a JSON string, and containing only ``code`` and ``message``. - BigQuery case: - [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project - a new dataset will be created with name - ``prediction__`` - where will be made BigQuery-dataset-name compatible (e.g. most - special characters will become underscores), and timestamp will - be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the - dataset two tables will be created, ``predictions``, and - ``errors``. The ``predictions`` table's column names will be the - input columns' - [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] - followed by the target column with name in the format of - "predicted*\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" - The input feature columns will contain the respective values of - successfully predicted rows, with the target column having an - ARRAY of - [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], - represented as STRUCT-s, containing - [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. - The ``errors`` table contains rows for which the prediction has - failed, it has analogous input columns while the target column - name is in the format of - "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] - [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", - and as a value has - ```google.rpc.Status`` `__ - represented as a STRUCT, and containing only ``code`` and - ``message``. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.automl_v1.types.GcsDestination): - Required. The Google Cloud Storage location - of the directory where the output is to be - written to. - - This field is a member of `oneof`_ ``destination``. - """ - - gcs_destination: 'GcsDestination' = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message='GcsDestination', - ) - - -class ModelExportOutputConfig(proto.Message): - r"""Output configuration for ModelExport Action. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.automl_v1.types.GcsDestination): - Required. The Google Cloud Storage location where the model - is to be written to. This location may only be set for the - following model formats: "tflite", "edgetpu_tflite", - "tf_saved_model", "tf_js", "core_ml". - - Under the directory given as the destination a new one with - name "model-export--", where timestamp is in - YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. - Inside the model and any of its supporting files will be - written. - - This field is a member of `oneof`_ ``destination``. - model_format (str): - The format in which the model must be exported. The - available, and default, formats depend on the problem and - model type (if given problem and type combination doesn't - have a format listed, it means its models are not - exportable): - - - For Image Classification mobile-low-latency-1, - mobile-versatile-1, mobile-high-accuracy-1: "tflite" - (default), "edgetpu_tflite", "tf_saved_model", "tf_js", - "docker". - - - For Image Classification mobile-core-ml-low-latency-1, - mobile-core-ml-versatile-1, - mobile-core-ml-high-accuracy-1: "core_ml" (default). - - - For Image Object Detection mobile-low-latency-1, - mobile-versatile-1, mobile-high-accuracy-1: "tflite", - "tf_saved_model", "tf_js". Formats description: - - - tflite - Used for Android mobile devices. - - - edgetpu_tflite - Used for `Edge - TPU `__ devices. - - - tf_saved_model - A tensorflow model in SavedModel format. - - - tf_js - A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. - - - docker - Used for Docker containers. Use the params field - to customize the container. The container is verified to - work correctly on ubuntu 16.04 operating system. See more - at `containers - quickstart `__ - - - core_ml - Used for iOS mobile devices. - params (MutableMapping[str, str]): - Additional model-type and format specific parameters - describing the requirements for the to be exported model - files, any string must be up to 25000 characters long. - - - For ``docker`` format: ``cpu_architecture`` - (string) - "x86_64" (default). ``gpu_architecture`` - (string) - "none" (default), "nvidia". - """ - - gcs_destination: 'GcsDestination' = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message='GcsDestination', - ) - model_format: str = proto.Field( - proto.STRING, - number=4, - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class GcsSource(proto.Message): - r"""The Google Cloud Storage location for the input content. - - Attributes: - input_uris (MutableSequence[str]): - Required. Google Cloud Storage URIs to input files, up to - 2000 characters long. Accepted forms: - - - Full object path, e.g. gs://bucket/directory/object.csv - """ - - input_uris: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class GcsDestination(proto.Message): - r"""The Google Cloud Storage location where the output is to be - written to. - - Attributes: - output_uri_prefix (str): - Required. Google Cloud Storage URI to output directory, up - to 2000 characters long. Accepted forms: - - - Prefix path: gs://bucket/directory The requesting user - must have write permission to the bucket. The directory - is created if it doesn't exist. - """ - - output_uri_prefix: str = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/model.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/model.py deleted file mode 100644 index ed64311b..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/model.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import image -from google.cloud.automl_v1.types import text -from google.cloud.automl_v1.types import translation -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'Model', - }, -) - - -class Model(proto.Message): - r"""API proto representing a trained machine learning model. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - translation_model_metadata (google.cloud.automl_v1.types.TranslationModelMetadata): - Metadata for translation models. - - This field is a member of `oneof`_ ``model_metadata``. - image_classification_model_metadata (google.cloud.automl_v1.types.ImageClassificationModelMetadata): - Metadata for image classification models. - - This field is a member of `oneof`_ ``model_metadata``. - text_classification_model_metadata (google.cloud.automl_v1.types.TextClassificationModelMetadata): - Metadata for text classification models. - - This field is a member of `oneof`_ ``model_metadata``. - image_object_detection_model_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelMetadata): - Metadata for image object detection models. - - This field is a member of `oneof`_ ``model_metadata``. - text_extraction_model_metadata (google.cloud.automl_v1.types.TextExtractionModelMetadata): - Metadata for text extraction models. - - This field is a member of `oneof`_ ``model_metadata``. - text_sentiment_model_metadata (google.cloud.automl_v1.types.TextSentimentModelMetadata): - Metadata for text sentiment models. - - This field is a member of `oneof`_ ``model_metadata``. - name (str): - Output only. Resource name of the model. Format: - ``projects/{project_id}/locations/{location_id}/models/{model_id}`` - display_name (str): - Required. The name of the model to show in the interface. - The name can be up to 32 characters long and can consist - only of ASCII Latin letters A-Z and a-z, underscores (_), - and ASCII digits 0-9. It must start with a letter. - dataset_id (str): - Required. The resource ID of the dataset used - to create the model. The dataset must come from - the same ancestor project and location. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the model - training finished and can be used for - prediction. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this model was - last updated. - deployment_state (google.cloud.automl_v1.types.Model.DeploymentState): - Output only. Deployment state of the model. A - model can only serve prediction requests after - it gets deployed. - etag (str): - Used to perform a consistent - read-modify-write updates. If not set, a blind - "overwrite" update happens. - labels (MutableMapping[str, str]): - Optional. The labels with user-defined - metadata to organize your model. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. Label values are optional. Label - keys must start with a letter. - - See https://goo.gl/xmQnxf for more information - on and examples of labels. - """ - class DeploymentState(proto.Enum): - r"""Deployment state of the model. - - Values: - DEPLOYMENT_STATE_UNSPECIFIED (0): - Should not be used, an un-set enum has this - value by default. - DEPLOYED (1): - Model is deployed. - UNDEPLOYED (2): - Model is not deployed. - """ - DEPLOYMENT_STATE_UNSPECIFIED = 0 - DEPLOYED = 1 - UNDEPLOYED = 2 - - translation_model_metadata: translation.TranslationModelMetadata = proto.Field( - proto.MESSAGE, - number=15, - oneof='model_metadata', - message=translation.TranslationModelMetadata, - ) - image_classification_model_metadata: image.ImageClassificationModelMetadata = proto.Field( - proto.MESSAGE, - number=13, - oneof='model_metadata', - message=image.ImageClassificationModelMetadata, - ) - text_classification_model_metadata: text.TextClassificationModelMetadata = proto.Field( - proto.MESSAGE, - number=14, - oneof='model_metadata', - message=text.TextClassificationModelMetadata, - ) - image_object_detection_model_metadata: image.ImageObjectDetectionModelMetadata = proto.Field( - proto.MESSAGE, - number=20, - oneof='model_metadata', - message=image.ImageObjectDetectionModelMetadata, - ) - text_extraction_model_metadata: text.TextExtractionModelMetadata = proto.Field( - proto.MESSAGE, - number=19, - oneof='model_metadata', - message=text.TextExtractionModelMetadata, - ) - text_sentiment_model_metadata: text.TextSentimentModelMetadata = proto.Field( - proto.MESSAGE, - number=22, - oneof='model_metadata', - message=text.TextSentimentModelMetadata, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - dataset_id: str = proto.Field( - proto.STRING, - number=3, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - update_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - deployment_state: DeploymentState = proto.Field( - proto.ENUM, - number=8, - enum=DeploymentState, - ) - etag: str = proto.Field( - proto.STRING, - number=10, - ) - labels: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=34, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py deleted file mode 100644 index ea4aca07..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/model_evaluation.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import classification -from google.cloud.automl_v1.types import detection -from google.cloud.automl_v1.types import text_extraction -from google.cloud.automl_v1.types import text_sentiment -from google.cloud.automl_v1.types import translation -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'ModelEvaluation', - }, -) - - -class ModelEvaluation(proto.Message): - r"""Evaluation results of a model. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - classification_evaluation_metrics (google.cloud.automl_v1.types.ClassificationEvaluationMetrics): - Model evaluation metrics for image, text, - video and tables classification. - Tables problem is considered a classification - when the target column is CATEGORY DataType. - - This field is a member of `oneof`_ ``metrics``. - translation_evaluation_metrics (google.cloud.automl_v1.types.TranslationEvaluationMetrics): - Model evaluation metrics for translation. - - This field is a member of `oneof`_ ``metrics``. - image_object_detection_evaluation_metrics (google.cloud.automl_v1.types.ImageObjectDetectionEvaluationMetrics): - Model evaluation metrics for image object - detection. - - This field is a member of `oneof`_ ``metrics``. - text_sentiment_evaluation_metrics (google.cloud.automl_v1.types.TextSentimentEvaluationMetrics): - Evaluation metrics for text sentiment models. - - This field is a member of `oneof`_ ``metrics``. - text_extraction_evaluation_metrics (google.cloud.automl_v1.types.TextExtractionEvaluationMetrics): - Evaluation metrics for text extraction - models. - - This field is a member of `oneof`_ ``metrics``. - name (str): - Output only. Resource name of the model evaluation. Format: - ``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`` - annotation_spec_id (str): - Output only. The ID of the annotation spec that the model - evaluation applies to. The The ID is empty for the overall - model evaluation. For Tables annotation specs in the dataset - do not exist and this ID is always not set, but for - CLASSIFICATION - [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] - the - [display_name][google.cloud.automl.v1.ModelEvaluation.display_name] - field is used. - display_name (str): - Output only. The value of - [display_name][google.cloud.automl.v1.AnnotationSpec.display_name] - at the moment when the model was trained. Because this field - returns a value at model training time, for different models - trained from the same dataset, the values may differ, since - display names could had been changed between the two model's - trainings. For Tables CLASSIFICATION - [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] - distinct values of the target column at the moment of the - model evaluation are populated here. The display_name is - empty for the overall model evaluation. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this model - evaluation was created. - evaluated_example_count (int): - Output only. The number of examples used for model - evaluation, i.e. for which ground truth from time of model - creation is compared against the predicted annotations - created by the model. For overall ModelEvaluation (i.e. with - annotation_spec_id not set) this is the total number of all - examples used for evaluation. Otherwise, this is the count - of examples that according to the ground truth were - annotated by the - [annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id]. - """ - - classification_evaluation_metrics: classification.ClassificationEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=8, - oneof='metrics', - message=classification.ClassificationEvaluationMetrics, - ) - translation_evaluation_metrics: translation.TranslationEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=9, - oneof='metrics', - message=translation.TranslationEvaluationMetrics, - ) - image_object_detection_evaluation_metrics: detection.ImageObjectDetectionEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=12, - oneof='metrics', - message=detection.ImageObjectDetectionEvaluationMetrics, - ) - text_sentiment_evaluation_metrics: text_sentiment.TextSentimentEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=11, - oneof='metrics', - message=text_sentiment.TextSentimentEvaluationMetrics, - ) - text_extraction_evaluation_metrics: text_extraction.TextExtractionEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=13, - oneof='metrics', - message=text_extraction.TextExtractionEvaluationMetrics, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - annotation_spec_id: str = proto.Field( - proto.STRING, - number=2, - ) - display_name: str = proto.Field( - proto.STRING, - number=15, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - evaluated_example_count: int = proto.Field( - proto.INT32, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py deleted file mode 100644 index 84fab23a..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/operations.py +++ /dev/null @@ -1,330 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import io -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'OperationMetadata', - 'DeleteOperationMetadata', - 'DeployModelOperationMetadata', - 'UndeployModelOperationMetadata', - 'CreateDatasetOperationMetadata', - 'CreateModelOperationMetadata', - 'ImportDataOperationMetadata', - 'ExportDataOperationMetadata', - 'BatchPredictOperationMetadata', - 'ExportModelOperationMetadata', - }, -) - - -class OperationMetadata(proto.Message): - r"""Metadata used across all long running operations returned by - AutoML API. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - delete_details (google.cloud.automl_v1.types.DeleteOperationMetadata): - Details of a Delete operation. - - This field is a member of `oneof`_ ``details``. - deploy_model_details (google.cloud.automl_v1.types.DeployModelOperationMetadata): - Details of a DeployModel operation. - - This field is a member of `oneof`_ ``details``. - undeploy_model_details (google.cloud.automl_v1.types.UndeployModelOperationMetadata): - Details of an UndeployModel operation. - - This field is a member of `oneof`_ ``details``. - create_model_details (google.cloud.automl_v1.types.CreateModelOperationMetadata): - Details of CreateModel operation. - - This field is a member of `oneof`_ ``details``. - create_dataset_details (google.cloud.automl_v1.types.CreateDatasetOperationMetadata): - Details of CreateDataset operation. - - This field is a member of `oneof`_ ``details``. - import_data_details (google.cloud.automl_v1.types.ImportDataOperationMetadata): - Details of ImportData operation. - - This field is a member of `oneof`_ ``details``. - batch_predict_details (google.cloud.automl_v1.types.BatchPredictOperationMetadata): - Details of BatchPredict operation. - - This field is a member of `oneof`_ ``details``. - export_data_details (google.cloud.automl_v1.types.ExportDataOperationMetadata): - Details of ExportData operation. - - This field is a member of `oneof`_ ``details``. - export_model_details (google.cloud.automl_v1.types.ExportModelOperationMetadata): - Details of ExportModel operation. - - This field is a member of `oneof`_ ``details``. - progress_percent (int): - Output only. Progress of operation. Range: [0, 100]. Not - used currently. - partial_failures (MutableSequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - E.g. single files that couldn't be read. - This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - updated for the last time. - """ - - delete_details: 'DeleteOperationMetadata' = proto.Field( - proto.MESSAGE, - number=8, - oneof='details', - message='DeleteOperationMetadata', - ) - deploy_model_details: 'DeployModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=24, - oneof='details', - message='DeployModelOperationMetadata', - ) - undeploy_model_details: 'UndeployModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=25, - oneof='details', - message='UndeployModelOperationMetadata', - ) - create_model_details: 'CreateModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=10, - oneof='details', - message='CreateModelOperationMetadata', - ) - create_dataset_details: 'CreateDatasetOperationMetadata' = proto.Field( - proto.MESSAGE, - number=30, - oneof='details', - message='CreateDatasetOperationMetadata', - ) - import_data_details: 'ImportDataOperationMetadata' = proto.Field( - proto.MESSAGE, - number=15, - oneof='details', - message='ImportDataOperationMetadata', - ) - batch_predict_details: 'BatchPredictOperationMetadata' = proto.Field( - proto.MESSAGE, - number=16, - oneof='details', - message='BatchPredictOperationMetadata', - ) - export_data_details: 'ExportDataOperationMetadata' = proto.Field( - proto.MESSAGE, - number=21, - oneof='details', - message='ExportDataOperationMetadata', - ) - export_model_details: 'ExportModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=22, - oneof='details', - message='ExportModelOperationMetadata', - ) - progress_percent: int = proto.Field( - proto.INT32, - number=13, - ) - partial_failures: MutableSequence[status_pb2.Status] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=status_pb2.Status, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteOperationMetadata(proto.Message): - r"""Details of operations that perform deletes of any entities. - """ - - -class DeployModelOperationMetadata(proto.Message): - r"""Details of DeployModel operation. - """ - - -class UndeployModelOperationMetadata(proto.Message): - r"""Details of UndeployModel operation. - """ - - -class CreateDatasetOperationMetadata(proto.Message): - r"""Details of CreateDataset operation. - """ - - -class CreateModelOperationMetadata(proto.Message): - r"""Details of CreateModel operation. - """ - - -class ImportDataOperationMetadata(proto.Message): - r"""Details of ImportData operation. - """ - - -class ExportDataOperationMetadata(proto.Message): - r"""Details of ExportData operation. - - Attributes: - output_info (google.cloud.automl_v1.types.ExportDataOperationMetadata.ExportDataOutputInfo): - Output only. Information further describing - this export data's output. - """ - - class ExportDataOutputInfo(proto.Message): - r"""Further describes this export data's output. Supplements - [OutputConfig][google.cloud.automl.v1.OutputConfig]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_output_directory (str): - The full path of the Google Cloud Storage - directory created, into which the exported data - is written. - - This field is a member of `oneof`_ ``output_location``. - """ - - gcs_output_directory: str = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - - output_info: ExportDataOutputInfo = proto.Field( - proto.MESSAGE, - number=1, - message=ExportDataOutputInfo, - ) - - -class BatchPredictOperationMetadata(proto.Message): - r"""Details of BatchPredict operation. - - Attributes: - input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): - Output only. The input config that was given - upon starting this batch predict operation. - output_info (google.cloud.automl_v1.types.BatchPredictOperationMetadata.BatchPredictOutputInfo): - Output only. Information further describing - this batch predict's output. - """ - - class BatchPredictOutputInfo(proto.Message): - r"""Further describes this batch predict's output. Supplements - [BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_output_directory (str): - The full path of the Google Cloud Storage - directory created, into which the prediction - output is written. - - This field is a member of `oneof`_ ``output_location``. - """ - - gcs_output_directory: str = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - - input_config: io.BatchPredictInputConfig = proto.Field( - proto.MESSAGE, - number=1, - message=io.BatchPredictInputConfig, - ) - output_info: BatchPredictOutputInfo = proto.Field( - proto.MESSAGE, - number=2, - message=BatchPredictOutputInfo, - ) - - -class ExportModelOperationMetadata(proto.Message): - r"""Details of ExportModel operation. - - Attributes: - output_info (google.cloud.automl_v1.types.ExportModelOperationMetadata.ExportModelOutputInfo): - Output only. Information further describing - the output of this model export. - """ - - class ExportModelOutputInfo(proto.Message): - r"""Further describes the output of model export. Supplements - [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. - - Attributes: - gcs_output_directory (str): - The full path of the Google Cloud Storage - directory created, into which the model will be - exported. - """ - - gcs_output_directory: str = proto.Field( - proto.STRING, - number=1, - ) - - output_info: ExportModelOutputInfo = proto.Field( - proto.MESSAGE, - number=2, - message=ExportModelOutputInfo, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py deleted file mode 100644 index c8dc1db3..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/prediction_service.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import annotation_payload -from google.cloud.automl_v1.types import data_items -from google.cloud.automl_v1.types import io - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'PredictRequest', - 'PredictResponse', - 'BatchPredictRequest', - 'BatchPredictResult', - }, -) - - -class PredictRequest(proto.Message): - r"""Request message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - - Attributes: - name (str): - Required. Name of the model requested to - serve the prediction. - payload (google.cloud.automl_v1.types.ExamplePayload): - Required. Payload to perform a prediction on. - The payload must match the problem type that the - model was trained to solve. - params (MutableMapping[str, str]): - Additional domain-specific parameters, any string must be up - to 25000 characters long. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When - the model makes predictions for an image, it will only - produce results that have at least this confidence score. - The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects on - the image, it will only produce bounding boxes which have at - least this confidence score. Value in 0 to 1 range, default - is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number of - bounding boxes returned. The default is 100. The number of - returned bounding boxes might be limited by the server. - - AutoML Tables - - ``feature_importance`` : (boolean) Whether - [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance] - is populated in the returned list of - [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] - objects. The default is false. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - payload: data_items.ExamplePayload = proto.Field( - proto.MESSAGE, - number=2, - message=data_items.ExamplePayload, - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - - -class PredictResponse(proto.Message): - r"""Response message for - [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - - Attributes: - payload (MutableSequence[google.cloud.automl_v1.types.AnnotationPayload]): - Prediction result. - AutoML Translation and AutoML Natural Language - Sentiment Analysis return precisely one payload. - preprocessed_input (google.cloud.automl_v1.types.ExamplePayload): - The preprocessed example that AutoML actually makes - prediction on. Empty if AutoML does not preprocess the input - example. - - For AutoML Natural Language (Classification, Entity - Extraction, and Sentiment Analysis), if the input is a - document, the recognized text is returned in the - [document_text][google.cloud.automl.v1.Document.document_text] - property. - metadata (MutableMapping[str, str]): - Additional domain-specific prediction response metadata. - - AutoML Vision Object Detection - - ``max_bounding_box_count`` : (int64) The maximum number of - bounding boxes to return per image. - - AutoML Natural Language Sentiment Analysis - - ``sentiment_score`` : (float, deprecated) A value between -1 - and 1, -1 maps to least positive sentiment, while 1 maps to - the most positive one and the higher the score, the more - positive the sentiment in the document is. Yet these values - are relative to the training data, so e.g. if all data was - positive then -1 is also positive (though the least). - ``sentiment_score`` is not the same as "score" and - "magnitude" from Sentiment Analysis in the Natural Language - API. - """ - - payload: MutableSequence[annotation_payload.AnnotationPayload] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=annotation_payload.AnnotationPayload, - ) - preprocessed_input: data_items.ExamplePayload = proto.Field( - proto.MESSAGE, - number=3, - message=data_items.ExamplePayload, - ) - metadata: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class BatchPredictRequest(proto.Message): - r"""Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - - Attributes: - name (str): - Required. Name of the model requested to - serve the batch prediction. - input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): - Required. The input configuration for batch - prediction. - output_config (google.cloud.automl_v1.types.BatchPredictOutputConfig): - Required. The Configuration specifying where - output predictions should be written. - params (MutableMapping[str, str]): - Additional domain-specific parameters for the predictions, - any string must be up to 25000 characters long. - - AutoML Natural Language Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When - the model makes predictions for a text snippet, it will only - produce results that have at least this confidence score. - The default is 0.5. - - AutoML Vision Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When - the model makes predictions for an image, it will only - produce results that have at least this confidence score. - The default is 0.5. - - AutoML Vision Object Detection - - ``score_threshold`` : (float) When Model detects objects on - the image, it will only produce bounding boxes which have at - least this confidence score. Value in 0 to 1 range, default - is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number of - bounding boxes returned per image. The default is 100, the - number of bounding boxes returned might be limited by the - server. AutoML Video Intelligence Classification - - ``score_threshold`` : (float) A value from 0.0 to 1.0. When - the model makes predictions for a video, it will only - produce results that have at least this confidence score. - The default is 0.5. - - ``segment_classification`` : (boolean) Set to true to - request segment-level classification. AutoML Video - Intelligence returns labels and their confidence scores for - the entire segment of the video that user specified in the - request configuration. The default is true. - - ``shot_classification`` : (boolean) Set to true to request - shot-level classification. AutoML Video Intelligence - determines the boundaries for each camera shot in the entire - segment of the video that user specified in the request - configuration. AutoML Video Intelligence then returns labels - and their confidence scores for each detected shot, along - with the start and end time of the shot. The default is - false. - - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on training - data, but there are no metrics provided to describe that - quality. - - ``1s_interval_classification`` : (boolean) Set to true to - request classification for a video at one-second intervals. - AutoML Video Intelligence returns labels and their - confidence scores for each second of the entire segment of - the video that user specified in the request configuration. - The default is false. - - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on training - data, but there are no metrics provided to describe that - quality. - - AutoML Video Intelligence Object Tracking - - ``score_threshold`` : (float) When Model detects objects on - video frames, it will only produce bounding boxes which have - at least this confidence score. Value in 0 to 1 range, - default is 0.5. - - ``max_bounding_box_count`` : (int64) The maximum number of - bounding boxes returned per image. The default is 100, the - number of bounding boxes returned might be limited by the - server. - - ``min_bounding_box_size`` : (float) Only bounding boxes with - shortest edge at least that long as a relative value of - video frame size are returned. Value in 0 to 1 range. - Default is 0. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - input_config: io.BatchPredictInputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.BatchPredictInputConfig, - ) - output_config: io.BatchPredictOutputConfig = proto.Field( - proto.MESSAGE, - number=4, - message=io.BatchPredictOutputConfig, - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - - -class BatchPredictResult(proto.Message): - r"""Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of the operation - returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - - Attributes: - metadata (MutableMapping[str, str]): - Additional domain-specific prediction response metadata. - - AutoML Vision Object Detection - - ``max_bounding_box_count`` : (int64) The maximum number of - bounding boxes returned per image. - - AutoML Video Intelligence Object Tracking - - ``max_bounding_box_count`` : (int64) The maximum number of - bounding boxes returned per frame. - """ - - metadata: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/service.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/service.py deleted file mode 100644 index 6bb29c51..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/service.py +++ /dev/null @@ -1,621 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import image -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation as gca_model_evaluation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'CreateDatasetRequest', - 'GetDatasetRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeleteDatasetRequest', - 'ImportDataRequest', - 'ExportDataRequest', - 'GetAnnotationSpecRequest', - 'CreateModelRequest', - 'GetModelRequest', - 'ListModelsRequest', - 'ListModelsResponse', - 'DeleteModelRequest', - 'UpdateModelRequest', - 'DeployModelRequest', - 'UndeployModelRequest', - 'ExportModelRequest', - 'GetModelEvaluationRequest', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - }, -) - - -class CreateDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. - - Attributes: - parent (str): - Required. The resource name of the project to - create the dataset for. - dataset (google.cloud.automl_v1.types.Dataset): - Required. The dataset to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - dataset: gca_dataset.Dataset = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.Dataset, - ) - - -class GetDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. - - Attributes: - name (str): - Required. The resource name of the dataset to - retrieve. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListDatasetsRequest(proto.Message): - r"""Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - Attributes: - parent (str): - Required. The resource name of the project - from which to list datasets. - filter (str): - An expression for filtering the results of the request. - - - ``dataset_metadata`` - for existence of the case (e.g. - ``image_classification_dataset_metadata:*``). Some - examples of using the filter are: - - - ``translation_dataset_metadata:*`` --> The dataset has - ``translation_dataset_metadata``. - page_size (int): - Requested page size. Server may return fewer - results than requested. If unspecified, server - will pick a default size. - page_token (str): - A token identifying a page of results for the server to - return Typically obtained via - [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] - of the previous - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDatasetsResponse(proto.Message): - r"""Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - - Attributes: - datasets (MutableSequence[google.cloud.automl_v1.types.Dataset]): - The datasets read. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - datasets: MutableSequence[gca_dataset.Dataset] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - - Attributes: - dataset (google.cloud.automl_v1.types.Dataset): - Required. The dataset which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. - """ - - dataset: gca_dataset.Dataset = proto.Field( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - - Attributes: - name (str): - Required. The resource name of the dataset to - delete. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ImportDataRequest(proto.Message): - r"""Request message for - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - - Attributes: - name (str): - Required. Dataset name. Dataset must already - exist. All imported annotations and examples - will be added. - input_config (google.cloud.automl_v1.types.InputConfig): - Required. The desired input location and its - domain specific semantics, if any. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - input_config: io.InputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.InputConfig, - ) - - -class ExportDataRequest(proto.Message): - r"""Request message for - [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - - Attributes: - name (str): - Required. The resource name of the dataset. - output_config (google.cloud.automl_v1.types.OutputConfig): - Required. The desired output location. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - output_config: io.OutputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.OutputConfig, - ) - - -class GetAnnotationSpecRequest(proto.Message): - r"""Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. - - Attributes: - name (str): - Required. The resource name of the annotation - spec to retrieve. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateModelRequest(proto.Message): - r"""Request message for - [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - - Attributes: - parent (str): - Required. Resource name of the parent project - where the model is being created. - model (google.cloud.automl_v1.types.Model): - Required. The model to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - model: gca_model.Model = proto.Field( - proto.MESSAGE, - number=4, - message=gca_model.Model, - ) - - -class GetModelRequest(proto.Message): - r"""Request message for - [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - - Attributes: - name (str): - Required. Resource name of the model. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelsRequest(proto.Message): - r"""Request message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Attributes: - parent (str): - Required. Resource name of the project, from - which to list the models. - filter (str): - An expression for filtering the results of the request. - - - ``model_metadata`` - for existence of the case (e.g. - ``video_classification_model_metadata:*``). - - - ``dataset_id`` - for = or !=. Some examples of using the - filter are: - - - ``image_classification_model_metadata:*`` --> The model - has ``image_classification_model_metadata``. - - - ``dataset_id=5`` --> The model was created from a dataset - with ID 5. - page_size (int): - Requested page size. - page_token (str): - A token identifying a page of results for the server to - return Typically obtained via - [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] - of the previous - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListModelsResponse(proto.Message): - r"""Response message for - [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - - Attributes: - model (MutableSequence[google.cloud.automl_v1.types.Model]): - List of models in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model: MutableSequence[gca_model.Model] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteModelRequest(proto.Message): - r"""Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - - Attributes: - name (str): - Required. Resource name of the model being - deleted. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class UpdateModelRequest(proto.Message): - r"""Request message for - [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - - Attributes: - model (google.cloud.automl_v1.types.Model): - Required. The model which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. - """ - - model: gca_model.Model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeployModelRequest(proto.Message): - r"""Request message for - [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - image_object_detection_model_deployment_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata): - Model deployment metadata specific to Image - Object Detection. - - This field is a member of `oneof`_ ``model_deployment_metadata``. - image_classification_model_deployment_metadata (google.cloud.automl_v1.types.ImageClassificationModelDeploymentMetadata): - Model deployment metadata specific to Image - Classification. - - This field is a member of `oneof`_ ``model_deployment_metadata``. - name (str): - Required. Resource name of the model to - deploy. - """ - - image_object_detection_model_deployment_metadata: image.ImageObjectDetectionModelDeploymentMetadata = proto.Field( - proto.MESSAGE, - number=2, - oneof='model_deployment_metadata', - message=image.ImageObjectDetectionModelDeploymentMetadata, - ) - image_classification_model_deployment_metadata: image.ImageClassificationModelDeploymentMetadata = proto.Field( - proto.MESSAGE, - number=4, - oneof='model_deployment_metadata', - message=image.ImageClassificationModelDeploymentMetadata, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class UndeployModelRequest(proto.Message): - r"""Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - - Attributes: - name (str): - Required. Resource name of the model to - undeploy. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ExportModelRequest(proto.Message): - r"""Request message for - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an error code - will be returned. - - Attributes: - name (str): - Required. The resource name of the model to - export. - output_config (google.cloud.automl_v1.types.ModelExportOutputConfig): - Required. The desired output location and - configuration. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - output_config: io.ModelExportOutputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.ModelExportOutputConfig, - ) - - -class GetModelEvaluationRequest(proto.Message): - r"""Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. - - Attributes: - name (str): - Required. Resource name for the model - evaluation. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationsRequest(proto.Message): - r"""Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - - Attributes: - parent (str): - Required. Resource name of the model to list - the model evaluations for. If modelId is set as - "-", this will list model evaluations from - across all models of the parent location. - filter (str): - Required. An expression for filtering the results of the - request. - - - ``annotation_spec_id`` - for =, != or existence. See - example below for the last. - - Some examples of using the filter are: - - - ``annotation_spec_id!=4`` --> The model evaluation was - done for annotation spec with ID different than 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation was - done for aggregate of all annotation specs. - page_size (int): - Requested page size. - page_token (str): - A token identifying a page of results for the server to - return. Typically obtained via - [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] - of the previous - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListModelEvaluationsResponse(proto.Message): - r"""Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - - Attributes: - model_evaluation (MutableSequence[google.cloud.automl_v1.types.ModelEvaluation]): - List of model evaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to the - [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] - field of a new - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] - request to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluation: MutableSequence[gca_model_evaluation.ModelEvaluation] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_evaluation.ModelEvaluation, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text.py deleted file mode 100644 index 0773af68..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/text.py +++ /dev/null @@ -1,104 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import classification - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'TextClassificationDatasetMetadata', - 'TextClassificationModelMetadata', - 'TextExtractionDatasetMetadata', - 'TextExtractionModelMetadata', - 'TextSentimentDatasetMetadata', - 'TextSentimentModelMetadata', - }, -) - - -class TextClassificationDatasetMetadata(proto.Message): - r"""Dataset metadata for classification. - - Attributes: - classification_type (google.cloud.automl_v1.types.ClassificationType): - Required. Type of the classification problem. - """ - - classification_type: classification.ClassificationType = proto.Field( - proto.ENUM, - number=1, - enum=classification.ClassificationType, - ) - - -class TextClassificationModelMetadata(proto.Message): - r"""Model metadata that is specific to text classification. - - Attributes: - classification_type (google.cloud.automl_v1.types.ClassificationType): - Output only. Classification type of the - dataset used to train this model. - """ - - classification_type: classification.ClassificationType = proto.Field( - proto.ENUM, - number=3, - enum=classification.ClassificationType, - ) - - -class TextExtractionDatasetMetadata(proto.Message): - r"""Dataset metadata that is specific to text extraction - """ - - -class TextExtractionModelMetadata(proto.Message): - r"""Model metadata that is specific to text extraction. - """ - - -class TextSentimentDatasetMetadata(proto.Message): - r"""Dataset metadata for text sentiment. - - Attributes: - sentiment_max (int): - Required. A sentiment is expressed as an integer ordinal, - where higher value means a more positive sentiment. The - range of sentiments that will be used is between 0 and - sentiment_max (inclusive on both ends), and all the values - in the range must be represented in the dataset before a - model can be created. sentiment_max value must be between 1 - and 10 (inclusive). - """ - - sentiment_max: int = proto.Field( - proto.INT32, - number=1, - ) - - -class TextSentimentModelMetadata(proto.Message): - r"""Model metadata that is specific to text sentiment. - """ - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py deleted file mode 100644 index 221420c7..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_extraction.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import text_segment as gca_text_segment - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'TextExtractionAnnotation', - 'TextExtractionEvaluationMetrics', - }, -) - - -class TextExtractionAnnotation(proto.Message): - r"""Annotation for identifying spans of text. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - text_segment (google.cloud.automl_v1.types.TextSegment): - An entity annotation will set this, which is - the part of the original text to which the - annotation pertains. - - This field is a member of `oneof`_ ``annotation``. - score (float): - Output only. A confidence estimate between - 0.0 and 1.0. A higher value means greater - confidence in correctness of the annotation. - """ - - text_segment: gca_text_segment.TextSegment = proto.Field( - proto.MESSAGE, - number=3, - oneof='annotation', - message=gca_text_segment.TextSegment, - ) - score: float = proto.Field( - proto.FLOAT, - number=1, - ) - - -class TextExtractionEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for text extraction problems. - - Attributes: - au_prc (float): - Output only. The Area under precision recall - curve metric. - confidence_metrics_entries (MutableSequence[google.cloud.automl_v1.types.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): - Output only. Metrics that have confidence - thresholds. Precision-recall curve can be - derived from it. - """ - - class ConfidenceMetricsEntry(proto.Message): - r"""Metrics for a single confidence threshold. - - Attributes: - confidence_threshold (float): - Output only. The confidence threshold value - used to compute the metrics. Only annotations - with score of at least this threshold are - considered to be ones the model would return. - recall (float): - Output only. Recall under the given - confidence threshold. - precision (float): - Output only. Precision under the given - confidence threshold. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - """ - - confidence_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - recall: float = proto.Field( - proto.FLOAT, - number=3, - ) - precision: float = proto.Field( - proto.FLOAT, - number=4, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=5, - ) - - au_prc: float = proto.Field( - proto.FLOAT, - number=1, - ) - confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=ConfidenceMetricsEntry, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py deleted file mode 100644 index 3d1d4a11..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_segment.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'TextSegment', - }, -) - - -class TextSegment(proto.Message): - r"""A contiguous part of a text (string), assuming it has an - UTF-8 NFC encoding. - - Attributes: - content (str): - Output only. The content of the TextSegment. - start_offset (int): - Required. Zero-based character index of the - first character of the text segment (counting - characters from the beginning of the text). - end_offset (int): - Required. Zero-based character index of the first character - past the end of the text segment (counting character from - the beginning of the text). The character at the end_offset - is NOT included in the text segment. - """ - - content: str = proto.Field( - proto.STRING, - number=3, - ) - start_offset: int = proto.Field( - proto.INT64, - number=1, - ) - end_offset: int = proto.Field( - proto.INT64, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py deleted file mode 100644 index 98289622..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/text_sentiment.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import classification - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'TextSentimentAnnotation', - 'TextSentimentEvaluationMetrics', - }, -) - - -class TextSentimentAnnotation(proto.Message): - r"""Contains annotation details specific to text sentiment. - - Attributes: - sentiment (int): - Output only. The sentiment with the semantic, as given to - the - [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] - when populating the dataset from which the model used for - the prediction had been trained. The sentiment values are - between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max - (inclusive), with higher value meaning more positive - sentiment. They are completely relative, i.e. 0 means least - positive sentiment and sentiment_max means the most positive - from the sentiments present in the train data. Therefore - e.g. if train data had only negative sentiment, then - sentiment_max, would be still negative (although least - negative). The sentiment shouldn't be confused with "score" - or "magnitude" from the previous Natural Language Sentiment - Analysis API. - """ - - sentiment: int = proto.Field( - proto.INT32, - number=1, - ) - - -class TextSentimentEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for text sentiment problems. - - Attributes: - precision (float): - Output only. Precision. - recall (float): - Output only. Recall. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - mean_absolute_error (float): - Output only. Mean absolute error. Only set - for the overall model evaluation, not for - evaluation of a single annotation spec. - mean_squared_error (float): - Output only. Mean squared error. Only set for - the overall model evaluation, not for evaluation - of a single annotation spec. - linear_kappa (float): - Output only. Linear weighted kappa. Only set - for the overall model evaluation, not for - evaluation of a single annotation spec. - quadratic_kappa (float): - Output only. Quadratic weighted kappa. Only - set for the overall model evaluation, not for - evaluation of a single annotation spec. - confusion_matrix (google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix): - Output only. Confusion matrix of the - evaluation. Only set for the overall model - evaluation, not for evaluation of a single - annotation spec. - """ - - precision: float = proto.Field( - proto.FLOAT, - number=1, - ) - recall: float = proto.Field( - proto.FLOAT, - number=2, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=3, - ) - mean_absolute_error: float = proto.Field( - proto.FLOAT, - number=4, - ) - mean_squared_error: float = proto.Field( - proto.FLOAT, - number=5, - ) - linear_kappa: float = proto.Field( - proto.FLOAT, - number=6, - ) - quadratic_kappa: float = proto.Field( - proto.FLOAT, - number=7, - ) - confusion_matrix: classification.ClassificationEvaluationMetrics.ConfusionMatrix = proto.Field( - proto.MESSAGE, - number=8, - message=classification.ClassificationEvaluationMetrics.ConfusionMatrix, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py b/owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py deleted file mode 100644 index 12810026..00000000 --- a/owl-bot-staging/v1/google/cloud/automl_v1/types/translation.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1.types import data_items - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1', - manifest={ - 'TranslationDatasetMetadata', - 'TranslationEvaluationMetrics', - 'TranslationModelMetadata', - 'TranslationAnnotation', - }, -) - - -class TranslationDatasetMetadata(proto.Message): - r"""Dataset metadata that is specific to translation. - - Attributes: - source_language_code (str): - Required. The BCP-47 language code of the - source language. - target_language_code (str): - Required. The BCP-47 language code of the - target language. - """ - - source_language_code: str = proto.Field( - proto.STRING, - number=1, - ) - target_language_code: str = proto.Field( - proto.STRING, - number=2, - ) - - -class TranslationEvaluationMetrics(proto.Message): - r"""Evaluation metrics for the dataset. - - Attributes: - bleu_score (float): - Output only. BLEU score. - base_bleu_score (float): - Output only. BLEU score for base model. - """ - - bleu_score: float = proto.Field( - proto.DOUBLE, - number=1, - ) - base_bleu_score: float = proto.Field( - proto.DOUBLE, - number=2, - ) - - -class TranslationModelMetadata(proto.Message): - r"""Model metadata that is specific to translation. - - Attributes: - base_model (str): - The resource name of the model to use as a baseline to train - the custom model. If unset, we use the default base model - provided by Google Translate. Format: - ``projects/{project_id}/locations/{location_id}/models/{model_id}`` - source_language_code (str): - Output only. Inferred from the dataset. - The source language (The BCP-47 language code) - that is used for training. - target_language_code (str): - Output only. The target language (The BCP-47 - language code) that is used for training. - """ - - base_model: str = proto.Field( - proto.STRING, - number=1, - ) - source_language_code: str = proto.Field( - proto.STRING, - number=2, - ) - target_language_code: str = proto.Field( - proto.STRING, - number=3, - ) - - -class TranslationAnnotation(proto.Message): - r"""Annotation details specific to translation. - - Attributes: - translated_content (google.cloud.automl_v1.types.TextSnippet): - Output only . The translated content. - """ - - translated_content: data_items.TextSnippet = proto.Field( - proto.MESSAGE, - number=1, - message=data_items.TextSnippet, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini deleted file mode 100644 index 574c5aed..00000000 --- a/owl-bot-staging/v1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.7 -namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py deleted file mode 100644 index 42f7a58a..00000000 --- a/owl-bot-staging/v1/noxfile.py +++ /dev/null @@ -1,184 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -ALL_PYTHON = [ - "3.7", - "3.8", - "3.9", - "3.10", - "3.11", -] - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - -BLACK_VERSION = "black==22.3.0" -BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.11" - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", - "blacken", - "lint", - "lint_setup_py", -] - -@nox.session(python=ALL_PYTHON) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/automl_v1/', - '--cov=tests/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=ALL_PYTHON) -def mypy(session): - """Run the type checker.""" - session.install( - 'mypy', - 'types-requests', - 'types-protobuf' - ) - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx==7.0.1", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", BLACK_VERSION) - session.run( - "black", - "--check", - *BLACK_PATHS, - ) - session.run("flake8", "google", "tests", "samples") - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def blacken(session): - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - session.run( - "black", - *BLACK_PATHS, - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint_setup_py(session): - """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "pygments") - session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py deleted file mode 100644 index 781bfeff..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_async.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_CreateDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_create_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - operation = client.create_dataset(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_CreateDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py deleted file mode 100644 index 55de89c2..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_dataset_sync.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_CreateDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_create_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - operation = client.create_dataset(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_CreateDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py deleted file mode 100644 index 6c5b56d9..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_CreateModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_create_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_CreateModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py deleted file mode 100644 index 38b2d1d9..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_create_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_CreateModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_create_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_CreateModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py deleted file mode 100644 index 52a75977..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_DeleteDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_delete_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_DeleteDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py deleted file mode 100644 index e24a7e8f..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_dataset_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_DeleteDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_delete_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_DeleteDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py deleted file mode 100644 index ff01ec1e..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_DeleteModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_delete_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_DeleteModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py deleted file mode 100644 index e4ff940c..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_delete_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_DeleteModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_delete_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_DeleteModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py deleted file mode 100644 index 5fdcf9eb..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_DeployModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_deploy_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_DeployModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py deleted file mode 100644 index 2fab8897..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_deploy_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_DeployModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_deploy_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_DeployModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py deleted file mode 100644 index c5b0913e..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_async.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ExportData_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_export_data(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - output_config = automl_v1.OutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportDataRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_ExportData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py deleted file mode 100644 index e9687c7a..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_data_sync.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ExportData_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_export_data(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - output_config = automl_v1.OutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportDataRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_ExportData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py deleted file mode 100644 index df0d73fb..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_async.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ExportModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_export_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - output_config = automl_v1.ModelExportOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportModelRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_ExportModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py deleted file mode 100644 index 19cca495..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_export_model_sync.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ExportModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_export_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - output_config = automl_v1.ModelExportOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.ExportModelRequest( - name="name_value", - output_config=output_config, - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_ExportModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py deleted file mode 100644 index ff09b999..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAnnotationSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetAnnotationSpec_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_get_annotation_spec(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_annotation_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetAnnotationSpec_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py deleted file mode 100644 index 2c75f200..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_annotation_spec_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAnnotationSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetAnnotationSpec_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_get_annotation_spec(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_annotation_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetAnnotationSpec_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py deleted file mode 100644 index 911401d8..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_get_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = await client.get_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py deleted file mode 100644 index 990253b5..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_dataset_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_get_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = client.get_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py deleted file mode 100644 index 2059f160..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_get_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py deleted file mode 100644 index 9758caa9..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModelEvaluation -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetModelEvaluation_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_get_model_evaluation(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model_evaluation(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetModelEvaluation_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py deleted file mode 100644 index 0e8000eb..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_evaluation_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModelEvaluation -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetModelEvaluation_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_get_model_evaluation(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = client.get_model_evaluation(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetModelEvaluation_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py deleted file mode 100644 index 692326b9..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_get_model_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_GetModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_get_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = client.get_model(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_GetModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py deleted file mode 100644 index 6966d258..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_async.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ImportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ImportData_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_import_data(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - input_config = automl_v1.InputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - request = automl_v1.ImportDataRequest( - name="name_value", - input_config=input_config, - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_ImportData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py deleted file mode 100644 index 05570793..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_import_data_sync.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ImportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ImportData_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_import_data(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - input_config = automl_v1.InputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - request = automl_v1.ImportDataRequest( - name="name_value", - input_config=input_config, - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_ImportData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py deleted file mode 100644 index 1f30e64d..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListDatasets -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ListDatasets_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_list_datasets(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1_generated_AutoMl_ListDatasets_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py deleted file mode 100644 index 443cf025..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_datasets_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListDatasets -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ListDatasets_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_list_datasets(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1_generated_AutoMl_ListDatasets_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py deleted file mode 100644 index 2d6b409d..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_async.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModelEvaluations -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ListModelEvaluations_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_list_model_evaluations(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.ListModelEvaluationsRequest( - parent="parent_value", - filter="filter_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1_generated_AutoMl_ListModelEvaluations_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py deleted file mode 100644 index 4d829cba..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_model_evaluations_sync.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModelEvaluations -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ListModelEvaluations_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_list_model_evaluations(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.ListModelEvaluationsRequest( - parent="parent_value", - filter="filter_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1_generated_AutoMl_ListModelEvaluations_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py deleted file mode 100644 index c6d25d99..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModels -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ListModels_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_list_models(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1_generated_AutoMl_ListModels_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py deleted file mode 100644 index 5518b988..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_list_models_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModels -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_ListModels_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_list_models(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1_generated_AutoMl_ListModels_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py deleted file mode 100644 index 904728e9..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UndeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_UndeployModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_undeploy_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_UndeployModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py deleted file mode 100644 index 38f1566c..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_undeploy_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UndeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_UndeployModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_undeploy_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_UndeployModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py deleted file mode 100644 index a49b5e3c..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_UpdateDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_update_dataset(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = await client.update_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_UpdateDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py deleted file mode 100644 index 8422cfae..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_dataset_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_UpdateDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_update_dataset(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = client.update_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_UpdateDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py deleted file mode 100644 index d3d96b92..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_async.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_UpdateModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_update_model(): - # Create a client - client = automl_v1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1.UpdateModelRequest( - ) - - # Make the request - response = await client.update_model(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_UpdateModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py deleted file mode 100644 index 8b379db9..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_auto_ml_update_model_sync.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_AutoMl_UpdateModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_update_model(): - # Create a client - client = automl_v1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1.UpdateModelRequest( - ) - - # Make the request - response = client.update_model(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_AutoMl_UpdateModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py deleted file mode 100644 index 5aecf9df..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_async.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for BatchPredict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_PredictionService_BatchPredict_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_batch_predict(): - # Create a client - client = automl_v1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - input_config = automl_v1.BatchPredictInputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - output_config = automl_v1.BatchPredictOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.BatchPredictRequest( - name="name_value", - input_config=input_config, - output_config=output_config, - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1_generated_PredictionService_BatchPredict_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py deleted file mode 100644 index 2e45b20a..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_batch_predict_sync.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for BatchPredict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_PredictionService_BatchPredict_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_batch_predict(): - # Create a client - client = automl_v1.PredictionServiceClient() - - # Initialize request argument(s) - input_config = automl_v1.BatchPredictInputConfig() - input_config.gcs_source.input_uris = ['input_uris_value1', 'input_uris_value2'] - - output_config = automl_v1.BatchPredictOutputConfig() - output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" - - request = automl_v1.BatchPredictRequest( - name="name_value", - input_config=input_config, - output_config=output_config, - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1_generated_PredictionService_BatchPredict_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py deleted file mode 100644 index 81f2f562..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for Predict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_PredictionService_Predict_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -async def sample_predict(): - # Create a client - client = automl_v1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - payload = automl_v1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = await client.predict(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_PredictionService_Predict_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py b/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py deleted file mode 100644 index 4f46921a..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/automl_v1_generated_prediction_service_predict_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for Predict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1_generated_PredictionService_Predict_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1 - - -def sample_predict(): - # Create a client - client = automl_v1.PredictionServiceClient() - - # Initialize request argument(s) - payload = automl_v1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = client.predict(request=request) - - # Handle the response - print(response) - -# [END automl_v1_generated_PredictionService_Predict_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json b/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json deleted file mode 100644 index df593972..00000000 --- a/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json +++ /dev/null @@ -1,3339 +0,0 @@ -{ - "clientLibrary": { - "apis": [ - { - "id": "google.cloud.automl.v1", - "version": "v1" - } - ], - "language": "PYTHON", - "name": "google-cloud-automl", - "version": "0.1.0" - }, - "snippets": [ - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.create_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.CreateDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.CreateDatasetRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1.types.Dataset" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_dataset" - }, - "description": "Sample for CreateDataset", - "file": "automl_v1_generated_auto_ml_create_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_CreateDataset_async", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_create_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.create_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.CreateDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.CreateDatasetRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1.types.Dataset" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_dataset" - }, - "description": "Sample for CreateDataset", - "file": "automl_v1_generated_auto_ml_create_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_CreateDataset_sync", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_create_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.create_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.CreateModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.CreateModelRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "model", - "type": "google.cloud.automl_v1.types.Model" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_model" - }, - "description": "Sample for CreateModel", - "file": "automl_v1_generated_auto_ml_create_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_CreateModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_create_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.create_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.CreateModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.CreateModelRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "model", - "type": "google.cloud.automl_v1.types.Model" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_model" - }, - "description": "Sample for CreateModel", - "file": "automl_v1_generated_auto_ml_create_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_CreateModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_create_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.delete_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.DeleteDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.DeleteDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_dataset" - }, - "description": "Sample for DeleteDataset", - "file": "automl_v1_generated_auto_ml_delete_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_DeleteDataset_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_delete_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.delete_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.DeleteDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.DeleteDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_dataset" - }, - "description": "Sample for DeleteDataset", - "file": "automl_v1_generated_auto_ml_delete_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_DeleteDataset_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_delete_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.delete_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.DeleteModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.DeleteModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model" - }, - "description": "Sample for DeleteModel", - "file": "automl_v1_generated_auto_ml_delete_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_DeleteModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_delete_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.delete_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.DeleteModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.DeleteModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model" - }, - "description": "Sample for DeleteModel", - "file": "automl_v1_generated_auto_ml_delete_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_DeleteModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_delete_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.deploy_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.DeployModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.DeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "deploy_model" - }, - "description": "Sample for DeployModel", - "file": "automl_v1_generated_auto_ml_deploy_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_DeployModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_deploy_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.deploy_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.DeployModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.DeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "deploy_model" - }, - "description": "Sample for DeployModel", - "file": "automl_v1_generated_auto_ml_deploy_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_DeployModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_deploy_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.export_data", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ExportData", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ExportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1.types.OutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_data" - }, - "description": "Sample for ExportData", - "file": "automl_v1_generated_auto_ml_export_data_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ExportData_async", - "segments": [ - { - "end": 59, - "start": 27, - "type": "FULL" - }, - { - "end": 59, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 56, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 60, - "start": 57, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_export_data_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.export_data", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ExportData", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ExportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1.types.OutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "export_data" - }, - "description": "Sample for ExportData", - "file": "automl_v1_generated_auto_ml_export_data_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ExportData_sync", - "segments": [ - { - "end": 59, - "start": 27, - "type": "FULL" - }, - { - "end": 59, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 56, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 60, - "start": 57, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_export_data_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.export_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ExportModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ExportModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1.types.ModelExportOutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_model" - }, - "description": "Sample for ExportModel", - "file": "automl_v1_generated_auto_ml_export_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ExportModel_async", - "segments": [ - { - "end": 59, - "start": 27, - "type": "FULL" - }, - { - "end": 59, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 56, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 60, - "start": 57, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_export_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.export_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ExportModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ExportModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1.types.ModelExportOutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "export_model" - }, - "description": "Sample for ExportModel", - "file": "automl_v1_generated_auto_ml_export_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ExportModel_sync", - "segments": [ - { - "end": 59, - "start": 27, - "type": "FULL" - }, - { - "end": 59, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 56, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 60, - "start": 57, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_export_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_annotation_spec", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetAnnotationSpec", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetAnnotationSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetAnnotationSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.AnnotationSpec", - "shortName": "get_annotation_spec" - }, - "description": "Sample for GetAnnotationSpec", - "file": "automl_v1_generated_auto_ml_get_annotation_spec_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetAnnotationSpec_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_annotation_spec_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.get_annotation_spec", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetAnnotationSpec", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetAnnotationSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetAnnotationSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.AnnotationSpec", - "shortName": "get_annotation_spec" - }, - "description": "Sample for GetAnnotationSpec", - "file": "automl_v1_generated_auto_ml_get_annotation_spec_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetAnnotationSpec_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_annotation_spec_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Dataset", - "shortName": "get_dataset" - }, - "description": "Sample for GetDataset", - "file": "automl_v1_generated_auto_ml_get_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetDataset_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.get_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Dataset", - "shortName": "get_dataset" - }, - "description": "Sample for GetDataset", - "file": "automl_v1_generated_auto_ml_get_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetDataset_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_model_evaluation", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetModelEvaluation", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModelEvaluation" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetModelEvaluationRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.ModelEvaluation", - "shortName": "get_model_evaluation" - }, - "description": "Sample for GetModelEvaluation", - "file": "automl_v1_generated_auto_ml_get_model_evaluation_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetModelEvaluation_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_model_evaluation_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.get_model_evaluation", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetModelEvaluation", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModelEvaluation" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetModelEvaluationRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.ModelEvaluation", - "shortName": "get_model_evaluation" - }, - "description": "Sample for GetModelEvaluation", - "file": "automl_v1_generated_auto_ml_get_model_evaluation_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetModelEvaluation_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_model_evaluation_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.get_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Model", - "shortName": "get_model" - }, - "description": "Sample for GetModel", - "file": "automl_v1_generated_auto_ml_get_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetModel_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.get_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.GetModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.GetModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Model", - "shortName": "get_model" - }, - "description": "Sample for GetModel", - "file": "automl_v1_generated_auto_ml_get_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_GetModel_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_get_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.import_data", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ImportData", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ImportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ImportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1.types.InputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "import_data" - }, - "description": "Sample for ImportData", - "file": "automl_v1_generated_auto_ml_import_data_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ImportData_async", - "segments": [ - { - "end": 59, - "start": 27, - "type": "FULL" - }, - { - "end": 59, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 56, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 60, - "start": 57, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_import_data_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.import_data", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ImportData", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ImportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ImportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1.types.InputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "import_data" - }, - "description": "Sample for ImportData", - "file": "automl_v1_generated_auto_ml_import_data_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ImportData_sync", - "segments": [ - { - "end": 59, - "start": 27, - "type": "FULL" - }, - { - "end": 59, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 56, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 60, - "start": 57, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_import_data_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.list_datasets", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ListDatasets", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListDatasets" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ListDatasetsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsAsyncPager", - "shortName": "list_datasets" - }, - "description": "Sample for ListDatasets", - "file": "automl_v1_generated_auto_ml_list_datasets_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ListDatasets_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_list_datasets_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.list_datasets", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ListDatasets", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListDatasets" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ListDatasetsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsPager", - "shortName": "list_datasets" - }, - "description": "Sample for ListDatasets", - "file": "automl_v1_generated_auto_ml_list_datasets_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ListDatasets_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_list_datasets_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.list_model_evaluations", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ListModelEvaluations", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModelEvaluations" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ListModelEvaluationsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "filter", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager", - "shortName": "list_model_evaluations" - }, - "description": "Sample for ListModelEvaluations", - "file": "automl_v1_generated_auto_ml_list_model_evaluations_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ListModelEvaluations_async", - "segments": [ - { - "end": 53, - "start": 27, - "type": "FULL" - }, - { - "end": 53, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 54, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_list_model_evaluations_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.list_model_evaluations", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ListModelEvaluations", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModelEvaluations" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ListModelEvaluationsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "filter", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsPager", - "shortName": "list_model_evaluations" - }, - "description": "Sample for ListModelEvaluations", - "file": "automl_v1_generated_auto_ml_list_model_evaluations_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ListModelEvaluations_sync", - "segments": [ - { - "end": 53, - "start": 27, - "type": "FULL" - }, - { - "end": 53, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 54, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_list_model_evaluations_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.list_models", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ListModels", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModels" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ListModelsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelsAsyncPager", - "shortName": "list_models" - }, - "description": "Sample for ListModels", - "file": "automl_v1_generated_auto_ml_list_models_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ListModels_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_list_models_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.list_models", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.ListModels", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModels" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.ListModelsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.services.auto_ml.pagers.ListModelsPager", - "shortName": "list_models" - }, - "description": "Sample for ListModels", - "file": "automl_v1_generated_auto_ml_list_models_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_ListModels_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_list_models_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.undeploy_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.UndeployModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UndeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.UndeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "undeploy_model" - }, - "description": "Sample for UndeployModel", - "file": "automl_v1_generated_auto_ml_undeploy_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_UndeployModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_undeploy_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.undeploy_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.UndeployModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UndeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.UndeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "undeploy_model" - }, - "description": "Sample for UndeployModel", - "file": "automl_v1_generated_auto_ml_undeploy_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_UndeployModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_undeploy_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.update_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.UpdateDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.UpdateDatasetRequest" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1.types.Dataset" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Dataset", - "shortName": "update_dataset" - }, - "description": "Sample for UpdateDataset", - "file": "automl_v1_generated_auto_ml_update_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_UpdateDataset_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_update_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.update_dataset", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.UpdateDataset", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.UpdateDatasetRequest" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1.types.Dataset" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Dataset", - "shortName": "update_dataset" - }, - "description": "Sample for UpdateDataset", - "file": "automl_v1_generated_auto_ml_update_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_UpdateDataset_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_update_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlAsyncClient.update_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.UpdateModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.UpdateModelRequest" - }, - { - "name": "model", - "type": "google.cloud.automl_v1.types.Model" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Model", - "shortName": "update_model" - }, - "description": "Sample for UpdateModel", - "file": "automl_v1_generated_auto_ml_update_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_UpdateModel_async", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 47, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "start": 48, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_update_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1.AutoMlClient.update_model", - "method": { - "fullName": "google.cloud.automl.v1.AutoMl.UpdateModel", - "service": { - "fullName": "google.cloud.automl.v1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.UpdateModelRequest" - }, - { - "name": "model", - "type": "google.cloud.automl_v1.types.Model" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.Model", - "shortName": "update_model" - }, - "description": "Sample for UpdateModel", - "file": "automl_v1_generated_auto_ml_update_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_AutoMl_UpdateModel_sync", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 47, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "start": 48, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_auto_ml_update_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" - }, - "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient.batch_predict", - "method": { - "fullName": "google.cloud.automl.v1.PredictionService.BatchPredict", - "service": { - "fullName": "google.cloud.automl.v1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "BatchPredict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.BatchPredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1.types.BatchPredictInputConfig" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1.types.BatchPredictOutputConfig" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_predict" - }, - "description": "Sample for BatchPredict", - "file": "automl_v1_generated_prediction_service_batch_predict_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_PredictionService_BatchPredict_async", - "segments": [ - { - "end": 63, - "start": 27, - "type": "FULL" - }, - { - "end": 63, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 53, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 60, - "start": 54, - "type": "REQUEST_EXECUTION" - }, - { - "end": 64, - "start": 61, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_prediction_service_batch_predict_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.PredictionServiceClient", - "shortName": "PredictionServiceClient" - }, - "fullName": "google.cloud.automl_v1.PredictionServiceClient.batch_predict", - "method": { - "fullName": "google.cloud.automl.v1.PredictionService.BatchPredict", - "service": { - "fullName": "google.cloud.automl.v1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "BatchPredict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.BatchPredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1.types.BatchPredictInputConfig" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1.types.BatchPredictOutputConfig" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "batch_predict" - }, - "description": "Sample for BatchPredict", - "file": "automl_v1_generated_prediction_service_batch_predict_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_PredictionService_BatchPredict_sync", - "segments": [ - { - "end": 63, - "start": 27, - "type": "FULL" - }, - { - "end": 63, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 53, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 60, - "start": 54, - "type": "REQUEST_EXECUTION" - }, - { - "end": 64, - "start": 61, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_prediction_service_batch_predict_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" - }, - "fullName": "google.cloud.automl_v1.PredictionServiceAsyncClient.predict", - "method": { - "fullName": "google.cloud.automl.v1.PredictionService.Predict", - "service": { - "fullName": "google.cloud.automl.v1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "Predict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.PredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "payload", - "type": "google.cloud.automl_v1.types.ExamplePayload" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.PredictResponse", - "shortName": "predict" - }, - "description": "Sample for Predict", - "file": "automl_v1_generated_prediction_service_predict_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_PredictionService_Predict_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_prediction_service_predict_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1.PredictionServiceClient", - "shortName": "PredictionServiceClient" - }, - "fullName": "google.cloud.automl_v1.PredictionServiceClient.predict", - "method": { - "fullName": "google.cloud.automl.v1.PredictionService.Predict", - "service": { - "fullName": "google.cloud.automl.v1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "Predict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1.types.PredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "payload", - "type": "google.cloud.automl_v1.types.ExamplePayload" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1.types.PredictResponse", - "shortName": "predict" - }, - "description": "Sample for Predict", - "file": "automl_v1_generated_prediction_service_predict_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1_generated_PredictionService_Predict_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1_generated_prediction_service_predict_sync.py" - } - ] -} diff --git a/owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py deleted file mode 100644 index ef6140ff..00000000 --- a/owl-bot-staging/v1/scripts/fixup_automl_v1_keywords.py +++ /dev/null @@ -1,195 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class automlCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'batch_predict': ('name', 'input_config', 'output_config', 'params', ), - 'create_dataset': ('parent', 'dataset', ), - 'create_model': ('parent', 'model', ), - 'delete_dataset': ('name', ), - 'delete_model': ('name', ), - 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ), - 'export_data': ('name', 'output_config', ), - 'export_model': ('name', 'output_config', ), - 'get_annotation_spec': ('name', ), - 'get_dataset': ('name', ), - 'get_model': ('name', ), - 'get_model_evaluation': ('name', ), - 'import_data': ('name', 'input_config', ), - 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ), - 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ), - 'list_models': ('parent', 'filter', 'page_size', 'page_token', ), - 'predict': ('name', 'payload', 'params', ), - 'undeploy_model': ('name', ), - 'update_dataset': ('dataset', 'update_mask', ), - 'update_model': ('model', 'update_mask', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=automlCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the automl client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py deleted file mode 100644 index 95b4c8d1..00000000 --- a/owl-bot-staging/v1/setup.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os - -import setuptools # type: ignore - -package_root = os.path.abspath(os.path.dirname(__file__)) - -name = 'google-cloud-automl' - - -description = "Google Cloud Automl API client library" - -version = {} -with open(os.path.join(package_root, 'google/cloud/automl/gapic_version.py')) as fp: - exec(fp.read(), version) -version = version["__version__"] - -if version[0] == "0": - release_status = "Development Status :: 4 - Beta" -else: - release_status = "Development Status :: 5 - Production/Stable" - -dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "proto-plus >= 1.22.0, <2.0.0dev", - "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", - "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", -] -url = "https://github.com/googleapis/python-automl" - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, "README.rst") -with io.open(readme_filename, encoding="utf-8") as readme_file: - readme = readme_file.read() - -packages = [ - package - for package in setuptools.PEP420PackageFinder.find() - if package.startswith("google") -] - -namespaces = ["google", "google.cloud"] - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author="Google LLC", - author_email="googleapis-packages@google.com", - license="Apache 2.0", - url=url, - classifiers=[ - release_status, - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Operating System :: OS Independent", - "Topic :: Internet", - ], - platforms="Posix; MacOS X; Windows", - packages=packages, - python_requires=">=3.7", - namespace_packages=namespaces, - install_requires=dependencies, - include_package_data=True, - zip_safe=False, -) diff --git a/owl-bot-staging/v1/testing/constraints-3.10.txt b/owl-bot-staging/v1/testing/constraints-3.10.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1/testing/constraints-3.10.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.11.txt b/owl-bot-staging/v1/testing/constraints-3.11.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1/testing/constraints-3.11.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.12.txt b/owl-bot-staging/v1/testing/constraints-3.12.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1/testing/constraints-3.12.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.7.txt b/owl-bot-staging/v1/testing/constraints-3.7.txt deleted file mode 100644 index 6c44adfe..00000000 --- a/owl-bot-staging/v1/testing/constraints-3.7.txt +++ /dev/null @@ -1,9 +0,0 @@ -# This constraints file is used to check that lower bounds -# are correct in setup.py -# List all library dependencies and extras in this file. -# Pin the version to the lower bound. -# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", -# Then this file should have google-cloud-foo==1.14.0 -google-api-core==1.34.0 -proto-plus==1.22.0 -protobuf==3.19.5 diff --git a/owl-bot-staging/v1/testing/constraints-3.8.txt b/owl-bot-staging/v1/testing/constraints-3.8.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1/testing/constraints-3.8.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1/testing/constraints-3.9.txt b/owl-bot-staging/v1/testing/constraints-3.9.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1/testing/constraints-3.9.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py deleted file mode 100644 index 494cd187..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_auto_ml.py +++ /dev/null @@ -1,10997 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable -from google.protobuf import json_format -import json -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.automl_v1.services.auto_ml import AutoMlAsyncClient -from google.cloud.automl_v1.services.auto_ml import AutoMlClient -from google.cloud.automl_v1.services.auto_ml import pagers -from google.cloud.automl_v1.services.auto_ml import transports -from google.cloud.automl_v1.types import annotation_spec -from google.cloud.automl_v1.types import classification -from google.cloud.automl_v1.types import dataset -from google.cloud.automl_v1.types import dataset as gca_dataset -from google.cloud.automl_v1.types import detection -from google.cloud.automl_v1.types import image -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import model -from google.cloud.automl_v1.types import model as gca_model -from google.cloud.automl_v1.types import model_evaluation -from google.cloud.automl_v1.types import operations -from google.cloud.automl_v1.types import service -from google.cloud.automl_v1.types import text -from google.cloud.automl_v1.types import text_extraction -from google.cloud.automl_v1.types import text_sentiment -from google.cloud.automl_v1.types import translation -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert AutoMlClient._get_default_mtls_endpoint(None) is None - assert AutoMlClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class,transport_name", [ - (AutoMlClient, "grpc"), - (AutoMlAsyncClient, "grpc_asyncio"), - (AutoMlClient, "rest"), -]) -def test_auto_ml_client_from_service_account_info(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.AutoMlGrpcTransport, "grpc"), - (transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.AutoMlRestTransport, "rest"), -]) -def test_auto_ml_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class,transport_name", [ - (AutoMlClient, "grpc"), - (AutoMlAsyncClient, "grpc_asyncio"), - (AutoMlClient, "rest"), -]) -def test_auto_ml_client_from_service_account_file(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -def test_auto_ml_client_get_transport_class(): - transport = AutoMlClient.get_transport_class() - available_transports = [ - transports.AutoMlGrpcTransport, - transports.AutoMlRestTransport, - ] - assert transport in available_transports - - transport = AutoMlClient.get_transport_class("grpc") - assert transport == transports.AutoMlGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), - (AutoMlClient, transports.AutoMlRestTransport, "rest"), -]) -@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) -@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) -def test_auto_ml_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class(transport=transport_name) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class(transport=transport_name) - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions(api_audience="https://language.googleapis.com") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com" - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "true"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "false"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "false"), - (AutoMlClient, transports.AutoMlRestTransport, "rest", "true"), - (AutoMlClient, transports.AutoMlRestTransport, "rest", "false"), -]) -@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) -@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_auto_ml_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class", [ - AutoMlClient, AutoMlAsyncClient -]) -@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) -@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) -def test_auto_ml_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), - (AutoMlClient, transports.AutoMlRestTransport, "rest"), -]) -def test_auto_ml_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), - (AutoMlClient, transports.AutoMlRestTransport, "rest", None), -]) -def test_auto_ml_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -def test_auto_ml_client_client_options_from_dict(): - with mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = AutoMlClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), -]) -def test_auto_ml_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=None, - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("request_type", [ - service.CreateDatasetRequest, - dict, -]) -def test_create_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - client.create_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateDatasetRequest() - -@pytest.mark.asyncio -async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=service.CreateDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_dataset_async_from_dict(): - await test_create_dataset_async(request_type=dict) - - -def test_create_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateDatasetRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateDatasetRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - - -def test_create_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - -@pytest.mark.asyncio -async def test_create_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_dataset( - service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetDatasetRequest, - dict, -]) -def test_get_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - response = client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_get_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - client.get_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetDatasetRequest() - -@pytest.mark.asyncio -async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=service.GetDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - )) - response = await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_dataset_async_from_dict(): - await test_get_dataset_async(request_type=dict) - - -def test_get_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = dataset.Dataset() - client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - service.GetDatasetRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_dataset( - service.GetDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListDatasetsRequest, - dict, -]) -def test_list_datasets(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - client.list_datasets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListDatasetsRequest() - -@pytest.mark.asyncio -async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=service.ListDatasetsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_datasets_async_from_dict(): - await test_list_datasets_async(request_type=dict) - - -def test_list_datasets_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListDatasetsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = service.ListDatasetsResponse() - client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_datasets_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListDatasetsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) - await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_datasets_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListDatasetsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_datasets_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - service.ListDatasetsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_datasets_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListDatasetsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_datasets_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_datasets( - service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_datasets(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) -def test_list_datasets_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = list(client.list_datasets(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_datasets_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_datasets(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_datasets_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_datasets(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - service.UpdateDatasetRequest, - dict, -]) -def test_update_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - response = client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_update_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - client.update_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateDatasetRequest() - -@pytest.mark.asyncio -async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=service.UpdateDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - )) - response = await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_dataset_async_from_dict(): - await test_update_dataset_async(request_type=dict) - - -def test_update_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateDatasetRequest() - - request.dataset.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = gca_dataset.Dataset() - client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateDatasetRequest() - - request.dataset.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=name_value', - ) in kw['metadata'] - - -def test_update_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_dataset( - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - -@pytest.mark.asyncio -async def test_update_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_dataset( - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_dataset( - service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeleteDatasetRequest, - dict, -]) -def test_delete_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - client.delete_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteDatasetRequest() - -@pytest.mark.asyncio -async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=service.DeleteDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_dataset_async_from_dict(): - await test_delete_dataset_async(request_type=dict) - - -def test_delete_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - service.DeleteDatasetRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_dataset( - service.DeleteDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ImportDataRequest, - dict, -]) -def test_import_data(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - client.import_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ImportDataRequest() - -@pytest.mark.asyncio -async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=service.ImportDataRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_data_async_from_dict(): - await test_import_data_async(request_type=dict) - - -def test_import_data_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ImportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_data_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ImportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_import_data_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_data( - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - - -def test_import_data_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - service.ImportDataRequest(), - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - -@pytest.mark.asyncio -async def test_import_data_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_data( - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_import_data_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_data( - service.ImportDataRequest(), - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportDataRequest, - dict, -]) -def test_export_data(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - client.export_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportDataRequest() - -@pytest.mark.asyncio -async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=service.ExportDataRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_data_async_from_dict(): - await test_export_data_async(request_type=dict) - - -def test_export_data_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_data_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_export_data_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_data( - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -def test_export_data_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - service.ExportDataRequest(), - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - -@pytest.mark.asyncio -async def test_export_data_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_data( - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_export_data_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_data( - service.ExportDataRequest(), - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetAnnotationSpecRequest, - dict, -]) -def test_get_annotation_spec(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - example_count=1396, - ) - response = client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.example_count == 1396 - - -def test_get_annotation_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - client.get_annotation_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetAnnotationSpecRequest() - -@pytest.mark.asyncio -async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetAnnotationSpecRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - example_count=1396, - )) - response = await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.example_count == 1396 - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async_from_dict(): - await test_get_annotation_spec_async(request_type=dict) - - -def test_get_annotation_spec_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetAnnotationSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = annotation_spec.AnnotationSpec() - client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_annotation_spec_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetAnnotationSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_annotation_spec_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_annotation_spec_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - service.GetAnnotationSpecRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_annotation_spec( - service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.CreateModelRequest, - dict, -]) -def test_create_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - client.create_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateModelRequest() - -@pytest.mark.asyncio -async def test_create_model_async(transport: str = 'grpc_asyncio', request_type=service.CreateModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_model_async_from_dict(): - await test_create_model_async(request_type=dict) - - -def test_create_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateModelRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateModelRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_model( - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) - assert arg == mock_val - - -def test_create_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_model( - service.CreateModelRequest(), - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - -@pytest.mark.asyncio -async def test_create_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_model( - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_model( - service.CreateModelRequest(), - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelRequest, - dict, -]) -def test_get_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=model.Model.DeploymentState.DEPLOYED, - etag='etag_value', - ) - response = client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == model.Model.DeploymentState.DEPLOYED - assert response.etag == 'etag_value' - - -def test_get_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - client.get_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelRequest() - -@pytest.mark.asyncio -async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=service.GetModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=model.Model.DeploymentState.DEPLOYED, - etag='etag_value', - )) - response = await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == model.Model.DeploymentState.DEPLOYED - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_model_async_from_dict(): - await test_get_model_async(request_type=dict) - - -def test_get_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = model.Model() - client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - service.GetModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model( - service.GetModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelsRequest, - dict, -]) -def test_list_models(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - client.list_models() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelsRequest() - -@pytest.mark.asyncio -async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=service.ListModelsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_models_async_from_dict(): - await test_list_models_async(request_type=dict) - - -def test_list_models_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = service.ListModelsResponse() - client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_models_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) - await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_models_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_models_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - service.ListModelsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_models_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_models_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_models( - service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_models(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) -def test_list_models_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = list(client.list_models(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_models_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_models(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model.Model) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_models_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_models(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - service.DeleteModelRequest, - dict, -]) -def test_delete_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - client.delete_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteModelRequest() - -@pytest.mark.asyncio -async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=service.DeleteModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_async_from_dict(): - await test_delete_model_async(request_type=dict) - - -def test_delete_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - service.DeleteModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model( - service.DeleteModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.UpdateModelRequest, - dict, -]) -def test_update_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=gca_model.Model.DeploymentState.DEPLOYED, - etag='etag_value', - ) - response = client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED - assert response.etag == 'etag_value' - - -def test_update_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - client.update_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateModelRequest() - -@pytest.mark.asyncio -async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=service.UpdateModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=gca_model.Model.DeploymentState.DEPLOYED, - etag='etag_value', - )) - response = await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_model_async_from_dict(): - await test_update_model_async(request_type=dict) - - -def test_update_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateModelRequest() - - request.model.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = gca_model.Model() - client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateModelRequest() - - request.model.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=name_value', - ) in kw['metadata'] - - -def test_update_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model( - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model( - service.UpdateModelRequest(), - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - -@pytest.mark.asyncio -async def test_update_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model( - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model( - service.UpdateModelRequest(), - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeployModelRequest, - dict, -]) -def test_deploy_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - client.deploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeployModelRequest() - -@pytest.mark.asyncio -async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=service.DeployModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_model_async_from_dict(): - await test_deploy_model_async(request_type=dict) - - -def test_deploy_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_deploy_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_deploy_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - service.DeployModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_deploy_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_deploy_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_model( - service.DeployModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.UndeployModelRequest, - dict, -]) -def test_undeploy_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - client.undeploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.UndeployModelRequest() - -@pytest.mark.asyncio -async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=service.UndeployModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_model_async_from_dict(): - await test_undeploy_model_async(request_type=dict) - - -def test_undeploy_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UndeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UndeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_undeploy_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_undeploy_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - service.UndeployModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_model( - service.UndeployModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportModelRequest, - dict, -]) -def test_export_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - client.export_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportModelRequest() - -@pytest.mark.asyncio -async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=service.ExportModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_model_async_from_dict(): - await test_export_model_async(request_type=dict) - - -def test_export_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_export_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_model( - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -def test_export_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - service.ExportModelRequest(), - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - -@pytest.mark.asyncio -async def test_export_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_model( - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_export_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_model( - service.ExportModelRequest(), - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelEvaluationRequest, - dict, -]) -def test_get_model_evaluation(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation( - name='name_value', - annotation_spec_id='annotation_spec_id_value', - display_name='display_name_value', - evaluated_example_count=2446, - ) - response = client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.annotation_spec_id == 'annotation_spec_id_value' - assert response.display_name == 'display_name_value' - assert response.evaluated_example_count == 2446 - - -def test_get_model_evaluation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - client.get_model_evaluation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelEvaluationRequest() - -@pytest.mark.asyncio -async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=service.GetModelEvaluationRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( - name='name_value', - annotation_spec_id='annotation_spec_id_value', - display_name='display_name_value', - evaluated_example_count=2446, - )) - response = await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.annotation_spec_id == 'annotation_spec_id_value' - assert response.display_name == 'display_name_value' - assert response.evaluated_example_count == 2446 - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async_from_dict(): - await test_get_model_evaluation_async(request_type=dict) - - -def test_get_model_evaluation_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelEvaluationRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = model_evaluation.ModelEvaluation() - client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelEvaluationRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_model_evaluation_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_evaluation_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - service.GetModelEvaluationRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation( - service.GetModelEvaluationRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelEvaluationsRequest, - dict, -]) -def test_list_model_evaluations(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - client.list_model_evaluations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelEvaluationsRequest() - -@pytest.mark.asyncio -async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=service.ListModelEvaluationsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_from_dict(): - await test_list_model_evaluations_async(request_type=dict) - - -def test_list_model_evaluations_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelEvaluationsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = service.ListModelEvaluationsResponse() - client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluations_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelEvaluationsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) - await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_model_evaluations_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelEvaluationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluations( - parent='parent_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].filter - mock_val = 'filter_value' - assert arg == mock_val - - -def test_list_model_evaluations_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - service.ListModelEvaluationsRequest(), - parent='parent_value', - filter='filter_value', - ) - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelEvaluationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluations( - parent='parent_value', - filter='filter_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].filter - mock_val = 'filter_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluations( - service.ListModelEvaluationsRequest(), - parent='parent_value', - filter='filter_value', - ) - - -def test_list_model_evaluations_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluations(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) -def test_list_model_evaluations_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_model_evaluations(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.CreateDatasetRequest, - dict, -]) -def test_create_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'name': 'name_value', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {'seconds': 751, 'nanos': 543}, 'etag': 'etag_value', 'labels': {}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.CreateDatasetRequest.meta.fields["dataset"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["dataset"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["dataset"][field])): - del request_init["dataset"][field][i][subfield] - else: - del request_init["dataset"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_dataset(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_dataset_rest_required_fields(request_type=service.CreateDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.create_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "dataset", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_create_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.CreateDatasetRequest.pb(service.CreateDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.CreateDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_dataset_rest_bad_request(transport: str = 'rest', request_type=service.CreateDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_dataset(request) - - -def test_create_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) - - -def test_create_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - -def test_create_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetDatasetRequest, - dict, -]) -def test_get_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_dataset(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_get_dataset_rest_required_fields(request_type=service.GetDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = dataset.Dataset() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetDatasetRequest.pb(service.GetDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = dataset.Dataset.to_json(dataset.Dataset()) - - request = service.GetDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = dataset.Dataset() - - client.get_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_dataset_rest_bad_request(transport: str = 'rest', request_type=service.GetDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_dataset(request) - - -def test_get_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = dataset.Dataset() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) - - -def test_get_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - service.GetDatasetRequest(), - name='name_value', - ) - - -def test_get_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListDatasetsRequest, - dict, -]) -def test_list_datasets_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListDatasetsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_datasets(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_rest_required_fields(request_type=service.ListDatasetsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListDatasetsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListDatasetsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_datasets(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_datasets_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_datasets._get_unset_required_fields({}) - assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_datasets_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_datasets") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_datasets") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListDatasetsRequest.pb(service.ListDatasetsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListDatasetsResponse.to_json(service.ListDatasetsResponse()) - - request = service.ListDatasetsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListDatasetsResponse() - - client.list_datasets(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_datasets_rest_bad_request(transport: str = 'rest', request_type=service.ListDatasetsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_datasets(request) - - -def test_list_datasets_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListDatasetsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListDatasetsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_datasets(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) - - -def test_list_datasets_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListDatasetsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - pager = client.list_datasets(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) - - pages = list(client.list_datasets(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.UpdateDatasetRequest, - dict, -]) -def test_update_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} - request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'name': 'projects/sample1/locations/sample2/datasets/sample3', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {'seconds': 751, 'nanos': 543}, 'etag': 'etag_value', 'labels': {}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.UpdateDatasetRequest.meta.fields["dataset"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["dataset"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["dataset"][field])): - del request_init["dataset"][field][i][subfield] - else: - del request_init["dataset"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_dataset(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_update_dataset_rest_required_fields(request_type=service.UpdateDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.update_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("dataset", "updateMask", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_update_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.UpdateDatasetRequest.pb(service.UpdateDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) - - request = service.UpdateDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gca_dataset.Dataset() - - client.update_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_dataset_rest_bad_request(transport: str = 'rest', request_type=service.UpdateDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_dataset(request) - - -def test_update_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset() - - # get arguments that satisfy an http rule for this method - sample_request = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} - - # get truthy value for each flattened field - mock_args = dict( - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{dataset.name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) - - -def test_update_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_update_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeleteDatasetRequest, - dict, -]) -def test_delete_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_dataset(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_delete_dataset_rest_required_fields(request_type=service.DeleteDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.delete_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.DeleteDatasetRequest.pb(service.DeleteDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.DeleteDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.delete_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_delete_dataset_rest_bad_request(transport: str = 'rest', request_type=service.DeleteDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_dataset(request) - - -def test_delete_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) - - -def test_delete_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - service.DeleteDatasetRequest(), - name='name_value', - ) - - -def test_delete_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ImportDataRequest, - dict, -]) -def test_import_data_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.import_data(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_import_data_rest_required_fields(request_type=service.ImportDataRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.import_data(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_import_data_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.import_data._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "inputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_import_data_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_import_data") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_import_data") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ImportDataRequest.pb(service.ImportDataRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.ImportDataRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.import_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_import_data_rest_bad_request(transport: str = 'rest', request_type=service.ImportDataRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.import_data(request) - - -def test_import_data_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.import_data(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}:importData" % client.transport._host, args[1]) - - -def test_import_data_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - service.ImportDataRequest(), - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - -def test_import_data_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportDataRequest, - dict, -]) -def test_export_data_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.export_data(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_export_data_rest_required_fields(request_type=service.ExportDataRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.export_data(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_export_data_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.export_data._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_export_data_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_export_data") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_data") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ExportDataRequest.pb(service.ExportDataRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.ExportDataRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.export_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_export_data_rest_bad_request(transport: str = 'rest', request_type=service.ExportDataRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.export_data(request) - - -def test_export_data_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.export_data(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*}:exportData" % client.transport._host, args[1]) - - -def test_export_data_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - service.ExportDataRequest(), - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_export_data_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetAnnotationSpecRequest, - dict, -]) -def test_get_annotation_spec_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - example_count=1396, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = annotation_spec.AnnotationSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_annotation_spec(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.example_count == 1396 - - -def test_get_annotation_spec_rest_required_fields(request_type=service.GetAnnotationSpecRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = annotation_spec.AnnotationSpec() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = annotation_spec.AnnotationSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_annotation_spec(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_annotation_spec_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_annotation_spec._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_annotation_spec_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_annotation_spec") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_annotation_spec") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetAnnotationSpecRequest.pb(service.GetAnnotationSpecRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = annotation_spec.AnnotationSpec.to_json(annotation_spec.AnnotationSpec()) - - request = service.GetAnnotationSpecRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = annotation_spec.AnnotationSpec() - - client.get_annotation_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_annotation_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetAnnotationSpecRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_annotation_spec(request) - - -def test_get_annotation_spec_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = annotation_spec.AnnotationSpec() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = annotation_spec.AnnotationSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_annotation_spec(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}" % client.transport._host, args[1]) - - -def test_get_annotation_spec_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -def test_get_annotation_spec_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.CreateModelRequest, - dict, -]) -def test_create_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request_init["model"] = {'translation_model_metadata': {'base_model': 'base_model_value', 'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_model_metadata': {'base_model_id': 'base_model_id_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881, 'stop_reason': 'stop_reason_value', 'model_type': 'model_type_value', 'node_qps': 0.857, 'node_count': 1070}, 'text_classification_model_metadata': {'classification_type': 1}, 'image_object_detection_model_metadata': {'model_type': 'model_type_value', 'node_count': 1070, 'node_qps': 0.857, 'stop_reason': 'stop_reason_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881}, 'text_extraction_model_metadata': {}, 'text_sentiment_model_metadata': {}, 'name': 'name_value', 'display_name': 'display_name_value', 'dataset_id': 'dataset_id_value', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'deployment_state': 1, 'etag': 'etag_value', 'labels': {}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.CreateModelRequest.meta.fields["model"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["model"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["model"][field])): - del request_init["model"][field][i][subfield] - else: - del request_init["model"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_model_rest_required_fields(request_type=service.CreateModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.create_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "model", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_create_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.CreateModelRequest.pb(service.CreateModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.CreateModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_model_rest_bad_request(transport: str = 'rest', request_type=service.CreateModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_model(request) - - -def test_create_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) - - -def test_create_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_model( - service.CreateModelRequest(), - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - -def test_create_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelRequest, - dict, -]) -def test_get_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=model.Model.DeploymentState.DEPLOYED, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_model(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == model.Model.DeploymentState.DEPLOYED - assert response.etag == 'etag_value' - - -def test_get_model_rest_required_fields(request_type=service.GetModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = model.Model() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetModelRequest.pb(service.GetModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = model.Model.to_json(model.Model()) - - request = service.GetModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = model.Model() - - client.get_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_model_rest_bad_request(transport: str = 'rest', request_type=service.GetModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_model(request) - - -def test_get_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model.Model() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) - - -def test_get_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - service.GetModelRequest(), - name='name_value', - ) - - -def test_get_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelsRequest, - dict, -]) -def test_list_models_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_models(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_rest_required_fields(request_type=service.ListModelsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListModelsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListModelsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_models(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_models_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_models._get_unset_required_fields({}) - assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_models_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_models") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_models") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListModelsRequest.pb(service.ListModelsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListModelsResponse.to_json(service.ListModelsResponse()) - - request = service.ListModelsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListModelsResponse() - - client.list_models(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_models_rest_bad_request(transport: str = 'rest', request_type=service.ListModelsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_models(request) - - -def test_list_models_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_models(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) - - -def test_list_models_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListModelsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - pager = client.list_models(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) - - pages = list(client.list_models(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.DeleteModelRequest, - dict, -]) -def test_delete_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_delete_model_rest_required_fields(request_type=service.DeleteModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.delete_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.DeleteModelRequest.pb(service.DeleteModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.DeleteModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.delete_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_delete_model_rest_bad_request(transport: str = 'rest', request_type=service.DeleteModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_model(request) - - -def test_delete_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) - - -def test_delete_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - service.DeleteModelRequest(), - name='name_value', - ) - - -def test_delete_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.UpdateModelRequest, - dict, -]) -def test_update_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'model': {'name': 'projects/sample1/locations/sample2/models/sample3'}} - request_init["model"] = {'translation_model_metadata': {'base_model': 'base_model_value', 'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_model_metadata': {'base_model_id': 'base_model_id_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881, 'stop_reason': 'stop_reason_value', 'model_type': 'model_type_value', 'node_qps': 0.857, 'node_count': 1070}, 'text_classification_model_metadata': {'classification_type': 1}, 'image_object_detection_model_metadata': {'model_type': 'model_type_value', 'node_count': 1070, 'node_qps': 0.857, 'stop_reason': 'stop_reason_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881}, 'text_extraction_model_metadata': {}, 'text_sentiment_model_metadata': {}, 'name': 'projects/sample1/locations/sample2/models/sample3', 'display_name': 'display_name_value', 'dataset_id': 'dataset_id_value', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'deployment_state': 1, 'etag': 'etag_value', 'labels': {}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.UpdateModelRequest.meta.fields["model"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["model"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["model"][field])): - del request_init["model"][field][i][subfield] - else: - del request_init["model"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=gca_model.Model.DeploymentState.DEPLOYED, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_model(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == gca_model.Model.DeploymentState.DEPLOYED - assert response.etag == 'etag_value' - - -def test_update_model_rest_required_fields(request_type=service.UpdateModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_model._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gca_model.Model() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gca_model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.update_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("model", "updateMask", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_update_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.UpdateModelRequest.pb(service.UpdateModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gca_model.Model.to_json(gca_model.Model()) - - request = service.UpdateModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gca_model.Model() - - client.update_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_model_rest_bad_request(transport: str = 'rest', request_type=service.UpdateModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'model': {'name': 'projects/sample1/locations/sample2/models/sample3'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_model(request) - - -def test_update_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_model.Model() - - # get arguments that satisfy an http rule for this method - sample_request = {'model': {'name': 'projects/sample1/locations/sample2/models/sample3'}} - - # get truthy value for each flattened field - mock_args = dict( - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{model.name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) - - -def test_update_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model( - service.UpdateModelRequest(), - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_update_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeployModelRequest, - dict, -]) -def test_deploy_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.deploy_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_deploy_model_rest_required_fields(request_type=service.DeployModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.deploy_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_deploy_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.deploy_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_deploy_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_deploy_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_deploy_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.DeployModelRequest.pb(service.DeployModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.DeployModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.deploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_deploy_model_rest_bad_request(transport: str = 'rest', request_type=service.DeployModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.deploy_model(request) - - -def test_deploy_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.deploy_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:deploy" % client.transport._host, args[1]) - - -def test_deploy_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - service.DeployModelRequest(), - name='name_value', - ) - - -def test_deploy_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.UndeployModelRequest, - dict, -]) -def test_undeploy_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.undeploy_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_undeploy_model_rest_required_fields(request_type=service.UndeployModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.undeploy_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_undeploy_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.undeploy_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undeploy_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_undeploy_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_undeploy_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.UndeployModelRequest.pb(service.UndeployModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.UndeployModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.undeploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_undeploy_model_rest_bad_request(transport: str = 'rest', request_type=service.UndeployModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.undeploy_model(request) - - -def test_undeploy_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.undeploy_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:undeploy" % client.transport._host, args[1]) - - -def test_undeploy_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - service.UndeployModelRequest(), - name='name_value', - ) - - -def test_undeploy_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportModelRequest, - dict, -]) -def test_export_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.export_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_export_model_rest_required_fields(request_type=service.ExportModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.export_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_export_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.export_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_export_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_export_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ExportModelRequest.pb(service.ExportModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.ExportModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.export_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_export_model_rest_bad_request(transport: str = 'rest', request_type=service.ExportModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.export_model(request) - - -def test_export_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.export_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:export" % client.transport._host, args[1]) - - -def test_export_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - service.ExportModelRequest(), - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_export_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelEvaluationRequest, - dict, -]) -def test_get_model_evaluation_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model_evaluation.ModelEvaluation( - name='name_value', - annotation_spec_id='annotation_spec_id_value', - display_name='display_name_value', - evaluated_example_count=2446, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model_evaluation.ModelEvaluation.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_model_evaluation(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.annotation_spec_id == 'annotation_spec_id_value' - assert response.display_name == 'display_name_value' - assert response.evaluated_example_count == 2446 - - -def test_get_model_evaluation_rest_required_fields(request_type=service.GetModelEvaluationRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = model_evaluation.ModelEvaluation() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = model_evaluation.ModelEvaluation.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_model_evaluation(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_model_evaluation_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_model_evaluation._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_model_evaluation_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model_evaluation") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model_evaluation") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetModelEvaluationRequest.pb(service.GetModelEvaluationRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = model_evaluation.ModelEvaluation.to_json(model_evaluation.ModelEvaluation()) - - request = service.GetModelEvaluationRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = model_evaluation.ModelEvaluation() - - client.get_model_evaluation(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_model_evaluation_rest_bad_request(transport: str = 'rest', request_type=service.GetModelEvaluationRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_model_evaluation(request) - - -def test_get_model_evaluation_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model_evaluation.ModelEvaluation() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model_evaluation.ModelEvaluation.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_model_evaluation(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" % client.transport._host, args[1]) - - -def test_get_model_evaluation_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - service.GetModelEvaluationRequest(), - name='name_value', - ) - - -def test_get_model_evaluation_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelEvaluationsRequest, - dict, -]) -def test_list_model_evaluations_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelEvaluationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_model_evaluations(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_rest_required_fields(request_type=service.ListModelEvaluationsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["filter"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - assert "filter" not in jsonified_request - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "filter" in jsonified_request - assert jsonified_request["filter"] == request_init["filter"] - - jsonified_request["parent"] = 'parent_value' - jsonified_request["filter"] = 'filter_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "filter" in jsonified_request - assert jsonified_request["filter"] == 'filter_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListModelEvaluationsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListModelEvaluationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_model_evaluations(request) - - expected_params = [ - ( - "filter", - "", - ), - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_model_evaluations_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_model_evaluations._get_unset_required_fields({}) - assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", "filter", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_model_evaluations_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_model_evaluations") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_model_evaluations") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListModelEvaluationsRequest.pb(service.ListModelEvaluationsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListModelEvaluationsResponse.to_json(service.ListModelEvaluationsResponse()) - - request = service.ListModelEvaluationsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListModelEvaluationsResponse() - - client.list_model_evaluations(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_model_evaluations_rest_bad_request(transport: str = 'rest', request_type=service.ListModelEvaluationsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_model_evaluations(request) - - -def test_list_model_evaluations_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelEvaluationsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - filter='filter_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelEvaluationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_model_evaluations(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations" % client.transport._host, args[1]) - - -def test_list_model_evaluations_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - service.ListModelEvaluationsRequest(), - parent='parent_value', - filter='filter_value', - ) - - -def test_list_model_evaluations_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListModelEvaluationsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - - pager = client.list_model_evaluations(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) - - pages = list(client.list_model_evaluations(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoMlClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = AutoMlClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = AutoMlClient( - client_options=options, - credentials=ga_credentials.AnonymousCredentials() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoMlClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = AutoMlClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.AutoMlGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.AutoMlGrpcTransport, - transports.AutoMlGrpcAsyncIOTransport, - transports.AutoMlRestTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "rest", -]) -def test_transport_kind(transport_name): - transport = AutoMlClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert transport.kind == transport_name - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.AutoMlGrpcTransport, - ) - -def test_auto_ml_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.AutoMlTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_auto_ml_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.AutoMlTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_dataset', - 'get_dataset', - 'list_datasets', - 'update_dataset', - 'delete_dataset', - 'import_data', - 'export_data', - 'get_annotation_spec', - 'create_model', - 'get_model', - 'list_models', - 'delete_model', - 'update_model', - 'deploy_model', - 'undeploy_model', - 'export_model', - 'get_model_evaluation', - 'list_model_evaluations', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - 'kind', - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_auto_ml_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoMlTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_auto_ml_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoMlTransport() - adc.assert_called_once() - - -def test_auto_ml_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - AutoMlClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoMlGrpcTransport, - transports.AutoMlGrpcAsyncIOTransport, - ], -) -def test_auto_ml_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoMlGrpcTransport, - transports.AutoMlGrpcAsyncIOTransport, - transports.AutoMlRestTransport, - ], -) -def test_auto_ml_transport_auth_gdch_credentials(transport_class): - host = 'https://language.com' - api_audience_tests = [None, 'https://language2.com'] - api_audience_expect = [host, 'https://language2.com'] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with( - e - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.AutoMlGrpcTransport, grpc_helpers), - (transports.AutoMlGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_auto_ml_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) -def test_auto_ml_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - -def test_auto_ml_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: - transports.AutoMlRestTransport ( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -def test_auto_ml_rest_lro_client(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_auto_ml_host_no_port(transport_name): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com' - ) - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_auto_ml_host_with_port(transport_name): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:8000' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com:8000' - ) - -@pytest.mark.parametrize("transport_name", [ - "rest", -]) -def test_auto_ml_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() - client1 = AutoMlClient( - credentials=creds1, - transport=transport_name, - ) - client2 = AutoMlClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.create_dataset._session - session2 = client2.transport.create_dataset._session - assert session1 != session2 - session1 = client1.transport.get_dataset._session - session2 = client2.transport.get_dataset._session - assert session1 != session2 - session1 = client1.transport.list_datasets._session - session2 = client2.transport.list_datasets._session - assert session1 != session2 - session1 = client1.transport.update_dataset._session - session2 = client2.transport.update_dataset._session - assert session1 != session2 - session1 = client1.transport.delete_dataset._session - session2 = client2.transport.delete_dataset._session - assert session1 != session2 - session1 = client1.transport.import_data._session - session2 = client2.transport.import_data._session - assert session1 != session2 - session1 = client1.transport.export_data._session - session2 = client2.transport.export_data._session - assert session1 != session2 - session1 = client1.transport.get_annotation_spec._session - session2 = client2.transport.get_annotation_spec._session - assert session1 != session2 - session1 = client1.transport.create_model._session - session2 = client2.transport.create_model._session - assert session1 != session2 - session1 = client1.transport.get_model._session - session2 = client2.transport.get_model._session - assert session1 != session2 - session1 = client1.transport.list_models._session - session2 = client2.transport.list_models._session - assert session1 != session2 - session1 = client1.transport.delete_model._session - session2 = client2.transport.delete_model._session - assert session1 != session2 - session1 = client1.transport.update_model._session - session2 = client2.transport.update_model._session - assert session1 != session2 - session1 = client1.transport.deploy_model._session - session2 = client2.transport.deploy_model._session - assert session1 != session2 - session1 = client1.transport.undeploy_model._session - session2 = client2.transport.undeploy_model._session - assert session1 != session2 - session1 = client1.transport.export_model._session - session2 = client2.transport.export_model._session - assert session1 != session2 - session1 = client1.transport.get_model_evaluation._session - session2 = client2.transport.get_model_evaluation._session - assert session1 != session2 - session1 = client1.transport.list_model_evaluations._session - session2 = client2.transport.list_model_evaluations._session - assert session1 != session2 -def test_auto_ml_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoMlGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_auto_ml_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoMlGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) -def test_auto_ml_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) -def test_auto_ml_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_auto_ml_grpc_lro_client(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_auto_ml_grpc_lro_async_client(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotation_spec_path(): - project = "squid" - location = "clam" - dataset = "whelk" - annotation_spec = "octopus" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - actual = AutoMlClient.annotation_spec_path(project, location, dataset, annotation_spec) - assert expected == actual - - -def test_parse_annotation_spec_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "dataset": "cuttlefish", - "annotation_spec": "mussel", - } - path = AutoMlClient.annotation_spec_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_annotation_spec_path(path) - assert expected == actual - -def test_dataset_path(): - project = "winkle" - location = "nautilus" - dataset = "scallop" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = AutoMlClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "abalone", - "location": "squid", - "dataset": "clam", - } - path = AutoMlClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_dataset_path(path) - assert expected == actual - -def test_model_path(): - project = "whelk" - location = "octopus" - model = "oyster" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = AutoMlClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "model": "mussel", - } - path = AutoMlClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_model_path(path) - assert expected == actual - -def test_model_evaluation_path(): - project = "winkle" - location = "nautilus" - model = "scallop" - model_evaluation = "abalone" - expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) - actual = AutoMlClient.model_evaluation_path(project, location, model, model_evaluation) - assert expected == actual - - -def test_parse_model_evaluation_path(): - expected = { - "project": "squid", - "location": "clam", - "model": "whelk", - "model_evaluation": "octopus", - } - path = AutoMlClient.model_evaluation_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_model_evaluation_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "oyster" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = AutoMlClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nudibranch", - } - path = AutoMlClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "cuttlefish" - expected = "folders/{folder}".format(folder=folder, ) - actual = AutoMlClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "mussel", - } - path = AutoMlClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "winkle" - expected = "organizations/{organization}".format(organization=organization, ) - actual = AutoMlClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nautilus", - } - path = AutoMlClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "scallop" - expected = "projects/{project}".format(project=project, ) - actual = AutoMlClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "abalone", - } - path = AutoMlClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "squid" - location = "clam" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = AutoMlClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "whelk", - "location": "octopus", - } - path = AutoMlClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: - transport_class = AutoMlClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'rest', - 'grpc', - ] - for transport in transports: - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - -@pytest.mark.parametrize("client_class,transport_class", [ - (AutoMlClient, transports.AutoMlGrpcTransport), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport), -]) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py b/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py deleted file mode 100644 index b17e3639..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/automl_v1/test_prediction_service.py +++ /dev/null @@ -1,2269 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable -from google.protobuf import json_format -import json -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.automl_v1.services.prediction_service import PredictionServiceAsyncClient -from google.cloud.automl_v1.services.prediction_service import PredictionServiceClient -from google.cloud.automl_v1.services.prediction_service import transports -from google.cloud.automl_v1.types import annotation_payload -from google.cloud.automl_v1.types import data_items -from google.cloud.automl_v1.types import geometry -from google.cloud.automl_v1.types import io -from google.cloud.automl_v1.types import operations -from google.cloud.automl_v1.types import prediction_service -from google.cloud.automl_v1.types import text_segment -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PredictionServiceClient._get_default_mtls_endpoint(None) is None - assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class,transport_name", [ - (PredictionServiceClient, "grpc"), - (PredictionServiceAsyncClient, "grpc_asyncio"), - (PredictionServiceClient, "rest"), -]) -def test_prediction_service_client_from_service_account_info(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PredictionServiceGrpcTransport, "grpc"), - (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.PredictionServiceRestTransport, "rest"), -]) -def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class,transport_name", [ - (PredictionServiceClient, "grpc"), - (PredictionServiceAsyncClient, "grpc_asyncio"), - (PredictionServiceClient, "rest"), -]) -def test_prediction_service_client_from_service_account_file(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -def test_prediction_service_client_get_transport_class(): - transport = PredictionServiceClient.get_transport_class() - available_transports = [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceRestTransport, - ] - assert transport in available_transports - - transport = PredictionServiceClient.get_transport_class("grpc") - assert transport == transports.PredictionServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class(transport=transport_name) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class(transport=transport_name) - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions(api_audience="https://language.googleapis.com") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com" - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "true"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "false"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, PredictionServiceAsyncClient -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), -]) -def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", None), -]) -def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -def test_prediction_service_client_client_options_from_dict(): - with mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PredictionServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), -]) -def test_prediction_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=None, - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.PredictRequest, - dict, -]) -def test_predict(request_type, transport: str = 'grpc'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse( - ) - response = client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - - -def test_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - client.predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - -@pytest.mark.asyncio -async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( - )) - response = await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - - -@pytest.mark.asyncio -async def test_predict_async_from_dict(): - await test_predict_async(request_type=dict) - - -def test_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = prediction_service.PredictResponse() - client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.predict( - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].payload - mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - - -def test_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - -@pytest.mark.asyncio -async def test_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.predict( - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].payload - mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - -@pytest.mark.asyncio -async def test_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.predict( - prediction_service.PredictRequest(), - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.BatchPredictRequest, - dict, -]) -def test_batch_predict(request_type, transport: str = 'grpc'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.BatchPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - client.batch_predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.BatchPredictRequest() - -@pytest.mark.asyncio -async def test_batch_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.BatchPredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.BatchPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_predict_async_from_dict(): - await test_batch_predict_async(request_type=dict) - - -def test_batch_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.BatchPredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.BatchPredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_batch_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_predict( - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - arg = args[0].output_config - mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - - -def test_batch_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_predict( - prediction_service.BatchPredictRequest(), - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - -@pytest.mark.asyncio -async def test_batch_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_predict( - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - arg = args[0].output_config - mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - -@pytest.mark.asyncio -async def test_batch_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_predict( - prediction_service.BatchPredictRequest(), - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.PredictRequest, - dict, -]) -def test_predict_rest(request_type): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = prediction_service.PredictResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = prediction_service.PredictResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.predict(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - - -def test_predict_rest_required_fields(request_type=prediction_service.PredictRequest): - transport_class = transports.PredictionServiceRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = prediction_service.PredictResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = prediction_service.PredictResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.predict(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_predict_rest_unset_required_fields(): - transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.predict._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "payload", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_predict_rest_interceptors(null_interceptor): - transport = transports.PredictionServiceRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), - ) - client = PredictionServiceClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "post_predict") as post, \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_predict") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = prediction_service.PredictRequest.pb(prediction_service.PredictRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = prediction_service.PredictResponse.to_json(prediction_service.PredictResponse()) - - request = prediction_service.PredictRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = prediction_service.PredictResponse() - - client.predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.PredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.predict(request) - - -def test_predict_rest_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = prediction_service.PredictResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = prediction_service.PredictResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.predict(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:predict" % client.transport._host, args[1]) - - -def test_predict_rest_flattened_error(transport: str = 'rest'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - -def test_predict_rest_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.BatchPredictRequest, - dict, -]) -def test_batch_predict_rest(request_type): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.batch_predict(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_batch_predict_rest_required_fields(request_type=prediction_service.BatchPredictRequest): - transport_class = transports.PredictionServiceRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.batch_predict(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_batch_predict_rest_unset_required_fields(): - transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.batch_predict._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "inputConfig", "outputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_batch_predict_rest_interceptors(null_interceptor): - transport = transports.PredictionServiceRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), - ) - client = PredictionServiceClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "post_batch_predict") as post, \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_batch_predict") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = prediction_service.BatchPredictRequest.pb(prediction_service.BatchPredictRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = prediction_service.BatchPredictRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.batch_predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_batch_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.BatchPredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.batch_predict(request) - - -def test_batch_predict_rest_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.batch_predict(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1/{name=projects/*/locations/*/models/*}:batchPredict" % client.transport._host, args[1]) - - -def test_batch_predict_rest_flattened_error(transport: str = 'rest'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_predict( - prediction_service.BatchPredictRequest(), - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - -def test_batch_predict_rest_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options=options, - credentials=ga_credentials.AnonymousCredentials() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PredictionServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PredictionServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - transports.PredictionServiceRestTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "rest", -]) -def test_transport_kind(transport_name): - transport = PredictionServiceClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert transport.kind == transport_name - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PredictionServiceGrpcTransport, - ) - -def test_prediction_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_prediction_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'predict', - 'batch_predict', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - 'kind', - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_prediction_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_prediction_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport() - adc.assert_called_once() - - -def test_prediction_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -def test_prediction_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - transports.PredictionServiceRestTransport, - ], -) -def test_prediction_service_transport_auth_gdch_credentials(transport_class): - host = 'https://language.com' - api_audience_tests = [None, 'https://language2.com'] - api_audience_expect = [host, 'https://language2.com'] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with( - e - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PredictionServiceGrpcTransport, grpc_helpers), - (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - -def test_prediction_service_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: - transports.PredictionServiceRestTransport ( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -def test_prediction_service_rest_lro_client(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_prediction_service_host_no_port(transport_name): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com' - ) - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_prediction_service_host_with_port(transport_name): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:8000' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com:8000' - ) - -@pytest.mark.parametrize("transport_name", [ - "rest", -]) -def test_prediction_service_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() - client1 = PredictionServiceClient( - credentials=creds1, - transport=transport_name, - ) - client2 = PredictionServiceClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.predict._session - session2 = client2.transport.predict._session - assert session1 != session2 - session1 = client1.transport.batch_predict._session - session2 = client2.transport.batch_predict._session - assert session1 != session2 -def test_prediction_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_prediction_service_grpc_lro_client(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_prediction_service_grpc_lro_async_client(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PredictionServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = PredictionServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_model_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PredictionServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = PredictionServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = PredictionServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = PredictionServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PredictionServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = PredictionServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = PredictionServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = PredictionServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PredictionServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = PredictionServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PredictionServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'rest', - 'grpc', - ] - for transport in transports: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - -@pytest.mark.parametrize("client_class,transport_class", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport), -]) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc deleted file mode 100644 index 8705cefd..00000000 --- a/owl-bot-staging/v1beta1/.coveragerc +++ /dev/null @@ -1,13 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/automl/__init__.py - google/cloud/automl/gapic_version.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ diff --git a/owl-bot-staging/v1beta1/.flake8 b/owl-bot-staging/v1beta1/.flake8 deleted file mode 100644 index 29227d4c..00000000 --- a/owl-bot-staging/v1beta1/.flake8 +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - **/services/** - **/types/** - *_pb2.py - - # Standard linting exemptions. - **/.nox/** - __pycache__, - .git, - *.pyc, - conf.py diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in deleted file mode 100644 index ba187221..00000000 --- a/owl-bot-staging/v1beta1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/automl *.py -recursive-include google/cloud/automl_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst deleted file mode 100644 index d0dde648..00000000 --- a/owl-bot-staging/v1beta1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Automl API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Automl API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/_static/custom.css b/owl-bot-staging/v1beta1/docs/_static/custom.css deleted file mode 100644 index 06423be0..00000000 --- a/owl-bot-staging/v1beta1/docs/_static/custom.css +++ /dev/null @@ -1,3 +0,0 @@ -dl.field-list > dt { - min-width: 100px -} diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst deleted file mode 100644 index ddb02f63..00000000 --- a/owl-bot-staging/v1beta1/docs/automl_v1beta1/auto_ml.rst +++ /dev/null @@ -1,10 +0,0 @@ -AutoMl ------------------------- - -.. automodule:: google.cloud.automl_v1beta1.services.auto_ml - :members: - :inherited-members: - -.. automodule:: google.cloud.automl_v1beta1.services.auto_ml.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst deleted file mode 100644 index e234e69f..00000000 --- a/owl-bot-staging/v1beta1/docs/automl_v1beta1/prediction_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -PredictionService ------------------------------------ - -.. automodule:: google.cloud.automl_v1beta1.services.prediction_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst deleted file mode 100644 index ebd9c7c8..00000000 --- a/owl-bot-staging/v1beta1/docs/automl_v1beta1/services.rst +++ /dev/null @@ -1,7 +0,0 @@ -Services for Google Cloud Automl v1beta1 API -============================================ -.. toctree:: - :maxdepth: 2 - - auto_ml - prediction_service diff --git a/owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst deleted file mode 100644 index b50b55f6..00000000 --- a/owl-bot-staging/v1beta1/docs/automl_v1beta1/types.rst +++ /dev/null @@ -1,6 +0,0 @@ -Types for Google Cloud Automl v1beta1 API -========================================= - -.. automodule:: google.cloud.automl_v1beta1.types - :members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py deleted file mode 100644 index 708bcaa7..00000000 --- a/owl-bot-staging/v1beta1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-automl documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "4.0.1" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The root toctree document. -root_doc = "index" - -# General information about the project. -project = u"google-cloud-automl" -copyright = u"2023, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-automl-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - root_doc, - "google-cloud-automl.tex", - u"google-cloud-automl Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - root_doc, - "google-cloud-automl", - u"Google Cloud Automl Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - root_doc, - "google-cloud-automl", - u"google-cloud-automl Documentation", - author, - "google-cloud-automl", - "GAPIC library for Google Cloud Automl API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst deleted file mode 100644 index f7d269b1..00000000 --- a/owl-bot-staging/v1beta1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - automl_v1beta1/services - automl_v1beta1/types diff --git a/owl-bot-staging/v1beta1/google/cloud/automl/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl/__init__.py deleted file mode 100644 index 98db5cfc..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl/__init__.py +++ /dev/null @@ -1,275 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.automl import gapic_version as package_version - -__version__ = package_version.__version__ - - -from google.cloud.automl_v1beta1.services.auto_ml.client import AutoMlClient -from google.cloud.automl_v1beta1.services.auto_ml.async_client import AutoMlAsyncClient -from google.cloud.automl_v1beta1.services.prediction_service.client import PredictionServiceClient -from google.cloud.automl_v1beta1.services.prediction_service.async_client import PredictionServiceAsyncClient - -from google.cloud.automl_v1beta1.types.annotation_payload import AnnotationPayload -from google.cloud.automl_v1beta1.types.annotation_spec import AnnotationSpec -from google.cloud.automl_v1beta1.types.classification import ClassificationAnnotation -from google.cloud.automl_v1beta1.types.classification import ClassificationEvaluationMetrics -from google.cloud.automl_v1beta1.types.classification import VideoClassificationAnnotation -from google.cloud.automl_v1beta1.types.classification import ClassificationType -from google.cloud.automl_v1beta1.types.column_spec import ColumnSpec -from google.cloud.automl_v1beta1.types.data_items import Document -from google.cloud.automl_v1beta1.types.data_items import DocumentDimensions -from google.cloud.automl_v1beta1.types.data_items import ExamplePayload -from google.cloud.automl_v1beta1.types.data_items import Image -from google.cloud.automl_v1beta1.types.data_items import Row -from google.cloud.automl_v1beta1.types.data_items import TextSnippet -from google.cloud.automl_v1beta1.types.data_stats import ArrayStats -from google.cloud.automl_v1beta1.types.data_stats import CategoryStats -from google.cloud.automl_v1beta1.types.data_stats import CorrelationStats -from google.cloud.automl_v1beta1.types.data_stats import DataStats -from google.cloud.automl_v1beta1.types.data_stats import Float64Stats -from google.cloud.automl_v1beta1.types.data_stats import StringStats -from google.cloud.automl_v1beta1.types.data_stats import StructStats -from google.cloud.automl_v1beta1.types.data_stats import TimestampStats -from google.cloud.automl_v1beta1.types.data_types import DataType -from google.cloud.automl_v1beta1.types.data_types import StructType -from google.cloud.automl_v1beta1.types.data_types import TypeCode -from google.cloud.automl_v1beta1.types.dataset import Dataset -from google.cloud.automl_v1beta1.types.detection import BoundingBoxMetricsEntry -from google.cloud.automl_v1beta1.types.detection import ImageObjectDetectionAnnotation -from google.cloud.automl_v1beta1.types.detection import ImageObjectDetectionEvaluationMetrics -from google.cloud.automl_v1beta1.types.detection import VideoObjectTrackingAnnotation -from google.cloud.automl_v1beta1.types.detection import VideoObjectTrackingEvaluationMetrics -from google.cloud.automl_v1beta1.types.geometry import BoundingPoly -from google.cloud.automl_v1beta1.types.geometry import NormalizedVertex -from google.cloud.automl_v1beta1.types.image import ImageClassificationDatasetMetadata -from google.cloud.automl_v1beta1.types.image import ImageClassificationModelDeploymentMetadata -from google.cloud.automl_v1beta1.types.image import ImageClassificationModelMetadata -from google.cloud.automl_v1beta1.types.image import ImageObjectDetectionDatasetMetadata -from google.cloud.automl_v1beta1.types.image import ImageObjectDetectionModelDeploymentMetadata -from google.cloud.automl_v1beta1.types.image import ImageObjectDetectionModelMetadata -from google.cloud.automl_v1beta1.types.io import BatchPredictInputConfig -from google.cloud.automl_v1beta1.types.io import BatchPredictOutputConfig -from google.cloud.automl_v1beta1.types.io import BigQueryDestination -from google.cloud.automl_v1beta1.types.io import BigQuerySource -from google.cloud.automl_v1beta1.types.io import DocumentInputConfig -from google.cloud.automl_v1beta1.types.io import ExportEvaluatedExamplesOutputConfig -from google.cloud.automl_v1beta1.types.io import GcrDestination -from google.cloud.automl_v1beta1.types.io import GcsDestination -from google.cloud.automl_v1beta1.types.io import GcsSource -from google.cloud.automl_v1beta1.types.io import InputConfig -from google.cloud.automl_v1beta1.types.io import ModelExportOutputConfig -from google.cloud.automl_v1beta1.types.io import OutputConfig -from google.cloud.automl_v1beta1.types.model import Model -from google.cloud.automl_v1beta1.types.model_evaluation import ModelEvaluation -from google.cloud.automl_v1beta1.types.operations import BatchPredictOperationMetadata -from google.cloud.automl_v1beta1.types.operations import CreateModelOperationMetadata -from google.cloud.automl_v1beta1.types.operations import DeleteOperationMetadata -from google.cloud.automl_v1beta1.types.operations import DeployModelOperationMetadata -from google.cloud.automl_v1beta1.types.operations import ExportDataOperationMetadata -from google.cloud.automl_v1beta1.types.operations import ExportEvaluatedExamplesOperationMetadata -from google.cloud.automl_v1beta1.types.operations import ExportModelOperationMetadata -from google.cloud.automl_v1beta1.types.operations import ImportDataOperationMetadata -from google.cloud.automl_v1beta1.types.operations import OperationMetadata -from google.cloud.automl_v1beta1.types.operations import UndeployModelOperationMetadata -from google.cloud.automl_v1beta1.types.prediction_service import BatchPredictRequest -from google.cloud.automl_v1beta1.types.prediction_service import BatchPredictResult -from google.cloud.automl_v1beta1.types.prediction_service import PredictRequest -from google.cloud.automl_v1beta1.types.prediction_service import PredictResponse -from google.cloud.automl_v1beta1.types.ranges import DoubleRange -from google.cloud.automl_v1beta1.types.regression import RegressionEvaluationMetrics -from google.cloud.automl_v1beta1.types.service import CreateDatasetRequest -from google.cloud.automl_v1beta1.types.service import CreateModelRequest -from google.cloud.automl_v1beta1.types.service import DeleteDatasetRequest -from google.cloud.automl_v1beta1.types.service import DeleteModelRequest -from google.cloud.automl_v1beta1.types.service import DeployModelRequest -from google.cloud.automl_v1beta1.types.service import ExportDataRequest -from google.cloud.automl_v1beta1.types.service import ExportEvaluatedExamplesRequest -from google.cloud.automl_v1beta1.types.service import ExportModelRequest -from google.cloud.automl_v1beta1.types.service import GetAnnotationSpecRequest -from google.cloud.automl_v1beta1.types.service import GetColumnSpecRequest -from google.cloud.automl_v1beta1.types.service import GetDatasetRequest -from google.cloud.automl_v1beta1.types.service import GetModelEvaluationRequest -from google.cloud.automl_v1beta1.types.service import GetModelRequest -from google.cloud.automl_v1beta1.types.service import GetTableSpecRequest -from google.cloud.automl_v1beta1.types.service import ImportDataRequest -from google.cloud.automl_v1beta1.types.service import ListColumnSpecsRequest -from google.cloud.automl_v1beta1.types.service import ListColumnSpecsResponse -from google.cloud.automl_v1beta1.types.service import ListDatasetsRequest -from google.cloud.automl_v1beta1.types.service import ListDatasetsResponse -from google.cloud.automl_v1beta1.types.service import ListModelEvaluationsRequest -from google.cloud.automl_v1beta1.types.service import ListModelEvaluationsResponse -from google.cloud.automl_v1beta1.types.service import ListModelsRequest -from google.cloud.automl_v1beta1.types.service import ListModelsResponse -from google.cloud.automl_v1beta1.types.service import ListTableSpecsRequest -from google.cloud.automl_v1beta1.types.service import ListTableSpecsResponse -from google.cloud.automl_v1beta1.types.service import UndeployModelRequest -from google.cloud.automl_v1beta1.types.service import UpdateColumnSpecRequest -from google.cloud.automl_v1beta1.types.service import UpdateDatasetRequest -from google.cloud.automl_v1beta1.types.service import UpdateTableSpecRequest -from google.cloud.automl_v1beta1.types.table_spec import TableSpec -from google.cloud.automl_v1beta1.types.tables import TablesAnnotation -from google.cloud.automl_v1beta1.types.tables import TablesDatasetMetadata -from google.cloud.automl_v1beta1.types.tables import TablesModelColumnInfo -from google.cloud.automl_v1beta1.types.tables import TablesModelMetadata -from google.cloud.automl_v1beta1.types.temporal import TimeSegment -from google.cloud.automl_v1beta1.types.text import TextClassificationDatasetMetadata -from google.cloud.automl_v1beta1.types.text import TextClassificationModelMetadata -from google.cloud.automl_v1beta1.types.text import TextExtractionDatasetMetadata -from google.cloud.automl_v1beta1.types.text import TextExtractionModelMetadata -from google.cloud.automl_v1beta1.types.text import TextSentimentDatasetMetadata -from google.cloud.automl_v1beta1.types.text import TextSentimentModelMetadata -from google.cloud.automl_v1beta1.types.text_extraction import TextExtractionAnnotation -from google.cloud.automl_v1beta1.types.text_extraction import TextExtractionEvaluationMetrics -from google.cloud.automl_v1beta1.types.text_segment import TextSegment -from google.cloud.automl_v1beta1.types.text_sentiment import TextSentimentAnnotation -from google.cloud.automl_v1beta1.types.text_sentiment import TextSentimentEvaluationMetrics -from google.cloud.automl_v1beta1.types.translation import TranslationAnnotation -from google.cloud.automl_v1beta1.types.translation import TranslationDatasetMetadata -from google.cloud.automl_v1beta1.types.translation import TranslationEvaluationMetrics -from google.cloud.automl_v1beta1.types.translation import TranslationModelMetadata -from google.cloud.automl_v1beta1.types.video import VideoClassificationDatasetMetadata -from google.cloud.automl_v1beta1.types.video import VideoClassificationModelMetadata -from google.cloud.automl_v1beta1.types.video import VideoObjectTrackingDatasetMetadata -from google.cloud.automl_v1beta1.types.video import VideoObjectTrackingModelMetadata - -__all__ = ('AutoMlClient', - 'AutoMlAsyncClient', - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', - 'AnnotationPayload', - 'AnnotationSpec', - 'ClassificationAnnotation', - 'ClassificationEvaluationMetrics', - 'VideoClassificationAnnotation', - 'ClassificationType', - 'ColumnSpec', - 'Document', - 'DocumentDimensions', - 'ExamplePayload', - 'Image', - 'Row', - 'TextSnippet', - 'ArrayStats', - 'CategoryStats', - 'CorrelationStats', - 'DataStats', - 'Float64Stats', - 'StringStats', - 'StructStats', - 'TimestampStats', - 'DataType', - 'StructType', - 'TypeCode', - 'Dataset', - 'BoundingBoxMetricsEntry', - 'ImageObjectDetectionAnnotation', - 'ImageObjectDetectionEvaluationMetrics', - 'VideoObjectTrackingAnnotation', - 'VideoObjectTrackingEvaluationMetrics', - 'BoundingPoly', - 'NormalizedVertex', - 'ImageClassificationDatasetMetadata', - 'ImageClassificationModelDeploymentMetadata', - 'ImageClassificationModelMetadata', - 'ImageObjectDetectionDatasetMetadata', - 'ImageObjectDetectionModelDeploymentMetadata', - 'ImageObjectDetectionModelMetadata', - 'BatchPredictInputConfig', - 'BatchPredictOutputConfig', - 'BigQueryDestination', - 'BigQuerySource', - 'DocumentInputConfig', - 'ExportEvaluatedExamplesOutputConfig', - 'GcrDestination', - 'GcsDestination', - 'GcsSource', - 'InputConfig', - 'ModelExportOutputConfig', - 'OutputConfig', - 'Model', - 'ModelEvaluation', - 'BatchPredictOperationMetadata', - 'CreateModelOperationMetadata', - 'DeleteOperationMetadata', - 'DeployModelOperationMetadata', - 'ExportDataOperationMetadata', - 'ExportEvaluatedExamplesOperationMetadata', - 'ExportModelOperationMetadata', - 'ImportDataOperationMetadata', - 'OperationMetadata', - 'UndeployModelOperationMetadata', - 'BatchPredictRequest', - 'BatchPredictResult', - 'PredictRequest', - 'PredictResponse', - 'DoubleRange', - 'RegressionEvaluationMetrics', - 'CreateDatasetRequest', - 'CreateModelRequest', - 'DeleteDatasetRequest', - 'DeleteModelRequest', - 'DeployModelRequest', - 'ExportDataRequest', - 'ExportEvaluatedExamplesRequest', - 'ExportModelRequest', - 'GetAnnotationSpecRequest', - 'GetColumnSpecRequest', - 'GetDatasetRequest', - 'GetModelEvaluationRequest', - 'GetModelRequest', - 'GetTableSpecRequest', - 'ImportDataRequest', - 'ListColumnSpecsRequest', - 'ListColumnSpecsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'ListTableSpecsRequest', - 'ListTableSpecsResponse', - 'UndeployModelRequest', - 'UpdateColumnSpecRequest', - 'UpdateDatasetRequest', - 'UpdateTableSpecRequest', - 'TableSpec', - 'TablesAnnotation', - 'TablesDatasetMetadata', - 'TablesModelColumnInfo', - 'TablesModelMetadata', - 'TimeSegment', - 'TextClassificationDatasetMetadata', - 'TextClassificationModelMetadata', - 'TextExtractionDatasetMetadata', - 'TextExtractionModelMetadata', - 'TextSentimentDatasetMetadata', - 'TextSentimentModelMetadata', - 'TextExtractionAnnotation', - 'TextExtractionEvaluationMetrics', - 'TextSegment', - 'TextSentimentAnnotation', - 'TextSentimentEvaluationMetrics', - 'TranslationAnnotation', - 'TranslationDatasetMetadata', - 'TranslationEvaluationMetrics', - 'TranslationModelMetadata', - 'VideoClassificationDatasetMetadata', - 'VideoClassificationModelMetadata', - 'VideoObjectTrackingDatasetMetadata', - 'VideoObjectTrackingModelMetadata', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py b/owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py deleted file mode 100644 index 360a0d13..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1beta1/google/cloud/automl/py.typed b/owl-bot-staging/v1beta1/google/cloud/automl/py.typed deleted file mode 100644 index 0560ba18..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py deleted file mode 100644 index 16ca8585..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/__init__.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.automl_v1beta1 import gapic_version as package_version - -__version__ = package_version.__version__ - - -from .services.auto_ml import AutoMlClient -from .services.auto_ml import AutoMlAsyncClient -from .services.prediction_service import PredictionServiceClient -from .services.prediction_service import PredictionServiceAsyncClient - -from .types.annotation_payload import AnnotationPayload -from .types.annotation_spec import AnnotationSpec -from .types.classification import ClassificationAnnotation -from .types.classification import ClassificationEvaluationMetrics -from .types.classification import VideoClassificationAnnotation -from .types.classification import ClassificationType -from .types.column_spec import ColumnSpec -from .types.data_items import Document -from .types.data_items import DocumentDimensions -from .types.data_items import ExamplePayload -from .types.data_items import Image -from .types.data_items import Row -from .types.data_items import TextSnippet -from .types.data_stats import ArrayStats -from .types.data_stats import CategoryStats -from .types.data_stats import CorrelationStats -from .types.data_stats import DataStats -from .types.data_stats import Float64Stats -from .types.data_stats import StringStats -from .types.data_stats import StructStats -from .types.data_stats import TimestampStats -from .types.data_types import DataType -from .types.data_types import StructType -from .types.data_types import TypeCode -from .types.dataset import Dataset -from .types.detection import BoundingBoxMetricsEntry -from .types.detection import ImageObjectDetectionAnnotation -from .types.detection import ImageObjectDetectionEvaluationMetrics -from .types.detection import VideoObjectTrackingAnnotation -from .types.detection import VideoObjectTrackingEvaluationMetrics -from .types.geometry import BoundingPoly -from .types.geometry import NormalizedVertex -from .types.image import ImageClassificationDatasetMetadata -from .types.image import ImageClassificationModelDeploymentMetadata -from .types.image import ImageClassificationModelMetadata -from .types.image import ImageObjectDetectionDatasetMetadata -from .types.image import ImageObjectDetectionModelDeploymentMetadata -from .types.image import ImageObjectDetectionModelMetadata -from .types.io import BatchPredictInputConfig -from .types.io import BatchPredictOutputConfig -from .types.io import BigQueryDestination -from .types.io import BigQuerySource -from .types.io import DocumentInputConfig -from .types.io import ExportEvaluatedExamplesOutputConfig -from .types.io import GcrDestination -from .types.io import GcsDestination -from .types.io import GcsSource -from .types.io import InputConfig -from .types.io import ModelExportOutputConfig -from .types.io import OutputConfig -from .types.model import Model -from .types.model_evaluation import ModelEvaluation -from .types.operations import BatchPredictOperationMetadata -from .types.operations import CreateModelOperationMetadata -from .types.operations import DeleteOperationMetadata -from .types.operations import DeployModelOperationMetadata -from .types.operations import ExportDataOperationMetadata -from .types.operations import ExportEvaluatedExamplesOperationMetadata -from .types.operations import ExportModelOperationMetadata -from .types.operations import ImportDataOperationMetadata -from .types.operations import OperationMetadata -from .types.operations import UndeployModelOperationMetadata -from .types.prediction_service import BatchPredictRequest -from .types.prediction_service import BatchPredictResult -from .types.prediction_service import PredictRequest -from .types.prediction_service import PredictResponse -from .types.ranges import DoubleRange -from .types.regression import RegressionEvaluationMetrics -from .types.service import CreateDatasetRequest -from .types.service import CreateModelRequest -from .types.service import DeleteDatasetRequest -from .types.service import DeleteModelRequest -from .types.service import DeployModelRequest -from .types.service import ExportDataRequest -from .types.service import ExportEvaluatedExamplesRequest -from .types.service import ExportModelRequest -from .types.service import GetAnnotationSpecRequest -from .types.service import GetColumnSpecRequest -from .types.service import GetDatasetRequest -from .types.service import GetModelEvaluationRequest -from .types.service import GetModelRequest -from .types.service import GetTableSpecRequest -from .types.service import ImportDataRequest -from .types.service import ListColumnSpecsRequest -from .types.service import ListColumnSpecsResponse -from .types.service import ListDatasetsRequest -from .types.service import ListDatasetsResponse -from .types.service import ListModelEvaluationsRequest -from .types.service import ListModelEvaluationsResponse -from .types.service import ListModelsRequest -from .types.service import ListModelsResponse -from .types.service import ListTableSpecsRequest -from .types.service import ListTableSpecsResponse -from .types.service import UndeployModelRequest -from .types.service import UpdateColumnSpecRequest -from .types.service import UpdateDatasetRequest -from .types.service import UpdateTableSpecRequest -from .types.table_spec import TableSpec -from .types.tables import TablesAnnotation -from .types.tables import TablesDatasetMetadata -from .types.tables import TablesModelColumnInfo -from .types.tables import TablesModelMetadata -from .types.temporal import TimeSegment -from .types.text import TextClassificationDatasetMetadata -from .types.text import TextClassificationModelMetadata -from .types.text import TextExtractionDatasetMetadata -from .types.text import TextExtractionModelMetadata -from .types.text import TextSentimentDatasetMetadata -from .types.text import TextSentimentModelMetadata -from .types.text_extraction import TextExtractionAnnotation -from .types.text_extraction import TextExtractionEvaluationMetrics -from .types.text_segment import TextSegment -from .types.text_sentiment import TextSentimentAnnotation -from .types.text_sentiment import TextSentimentEvaluationMetrics -from .types.translation import TranslationAnnotation -from .types.translation import TranslationDatasetMetadata -from .types.translation import TranslationEvaluationMetrics -from .types.translation import TranslationModelMetadata -from .types.video import VideoClassificationDatasetMetadata -from .types.video import VideoClassificationModelMetadata -from .types.video import VideoObjectTrackingDatasetMetadata -from .types.video import VideoObjectTrackingModelMetadata - -__all__ = ( - 'AutoMlAsyncClient', - 'PredictionServiceAsyncClient', -'AnnotationPayload', -'AnnotationSpec', -'ArrayStats', -'AutoMlClient', -'BatchPredictInputConfig', -'BatchPredictOperationMetadata', -'BatchPredictOutputConfig', -'BatchPredictRequest', -'BatchPredictResult', -'BigQueryDestination', -'BigQuerySource', -'BoundingBoxMetricsEntry', -'BoundingPoly', -'CategoryStats', -'ClassificationAnnotation', -'ClassificationEvaluationMetrics', -'ClassificationType', -'ColumnSpec', -'CorrelationStats', -'CreateDatasetRequest', -'CreateModelOperationMetadata', -'CreateModelRequest', -'DataStats', -'DataType', -'Dataset', -'DeleteDatasetRequest', -'DeleteModelRequest', -'DeleteOperationMetadata', -'DeployModelOperationMetadata', -'DeployModelRequest', -'Document', -'DocumentDimensions', -'DocumentInputConfig', -'DoubleRange', -'ExamplePayload', -'ExportDataOperationMetadata', -'ExportDataRequest', -'ExportEvaluatedExamplesOperationMetadata', -'ExportEvaluatedExamplesOutputConfig', -'ExportEvaluatedExamplesRequest', -'ExportModelOperationMetadata', -'ExportModelRequest', -'Float64Stats', -'GcrDestination', -'GcsDestination', -'GcsSource', -'GetAnnotationSpecRequest', -'GetColumnSpecRequest', -'GetDatasetRequest', -'GetModelEvaluationRequest', -'GetModelRequest', -'GetTableSpecRequest', -'Image', -'ImageClassificationDatasetMetadata', -'ImageClassificationModelDeploymentMetadata', -'ImageClassificationModelMetadata', -'ImageObjectDetectionAnnotation', -'ImageObjectDetectionDatasetMetadata', -'ImageObjectDetectionEvaluationMetrics', -'ImageObjectDetectionModelDeploymentMetadata', -'ImageObjectDetectionModelMetadata', -'ImportDataOperationMetadata', -'ImportDataRequest', -'InputConfig', -'ListColumnSpecsRequest', -'ListColumnSpecsResponse', -'ListDatasetsRequest', -'ListDatasetsResponse', -'ListModelEvaluationsRequest', -'ListModelEvaluationsResponse', -'ListModelsRequest', -'ListModelsResponse', -'ListTableSpecsRequest', -'ListTableSpecsResponse', -'Model', -'ModelEvaluation', -'ModelExportOutputConfig', -'NormalizedVertex', -'OperationMetadata', -'OutputConfig', -'PredictRequest', -'PredictResponse', -'PredictionServiceClient', -'RegressionEvaluationMetrics', -'Row', -'StringStats', -'StructStats', -'StructType', -'TableSpec', -'TablesAnnotation', -'TablesDatasetMetadata', -'TablesModelColumnInfo', -'TablesModelMetadata', -'TextClassificationDatasetMetadata', -'TextClassificationModelMetadata', -'TextExtractionAnnotation', -'TextExtractionDatasetMetadata', -'TextExtractionEvaluationMetrics', -'TextExtractionModelMetadata', -'TextSegment', -'TextSentimentAnnotation', -'TextSentimentDatasetMetadata', -'TextSentimentEvaluationMetrics', -'TextSentimentModelMetadata', -'TextSnippet', -'TimeSegment', -'TimestampStats', -'TranslationAnnotation', -'TranslationDatasetMetadata', -'TranslationEvaluationMetrics', -'TranslationModelMetadata', -'TypeCode', -'UndeployModelOperationMetadata', -'UndeployModelRequest', -'UpdateColumnSpecRequest', -'UpdateDatasetRequest', -'UpdateTableSpecRequest', -'VideoClassificationAnnotation', -'VideoClassificationDatasetMetadata', -'VideoClassificationModelMetadata', -'VideoObjectTrackingAnnotation', -'VideoObjectTrackingDatasetMetadata', -'VideoObjectTrackingEvaluationMetrics', -'VideoObjectTrackingModelMetadata', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json deleted file mode 100644 index 74e85289..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,437 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.automl_v1beta1", - "protoPackage": "google.cloud.automl.v1beta1", - "schema": "1.0", - "services": { - "AutoMl": { - "clients": { - "grpc": { - "libraryClient": "AutoMlClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "CreateModel": { - "methods": [ - "create_model" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "ExportEvaluatedExamples": { - "methods": [ - "export_evaluated_examples" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetColumnSpec": { - "methods": [ - "get_column_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetTableSpec": { - "methods": [ - "get_table_spec" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListColumnSpecs": { - "methods": [ - "list_column_specs" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "ListTableSpecs": { - "methods": [ - "list_table_specs" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateColumnSpec": { - "methods": [ - "update_column_spec" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - }, - "UpdateTableSpec": { - "methods": [ - "update_table_spec" - ] - } - } - }, - "grpc-async": { - "libraryClient": "AutoMlAsyncClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "CreateModel": { - "methods": [ - "create_model" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "ExportEvaluatedExamples": { - "methods": [ - "export_evaluated_examples" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetColumnSpec": { - "methods": [ - "get_column_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetTableSpec": { - "methods": [ - "get_table_spec" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListColumnSpecs": { - "methods": [ - "list_column_specs" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "ListTableSpecs": { - "methods": [ - "list_table_specs" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateColumnSpec": { - "methods": [ - "update_column_spec" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - }, - "UpdateTableSpec": { - "methods": [ - "update_table_spec" - ] - } - } - }, - "rest": { - "libraryClient": "AutoMlClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "CreateModel": { - "methods": [ - "create_model" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "ExportEvaluatedExamples": { - "methods": [ - "export_evaluated_examples" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetColumnSpec": { - "methods": [ - "get_column_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetTableSpec": { - "methods": [ - "get_table_spec" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListColumnSpecs": { - "methods": [ - "list_column_specs" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "ListTableSpecs": { - "methods": [ - "list_table_specs" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateColumnSpec": { - "methods": [ - "update_column_spec" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - }, - "UpdateTableSpec": { - "methods": [ - "update_table_spec" - ] - } - } - } - } - }, - "PredictionService": { - "clients": { - "grpc": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "BatchPredict": { - "methods": [ - "batch_predict" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PredictionServiceAsyncClient", - "rpcs": { - "BatchPredict": { - "methods": [ - "batch_predict" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - }, - "rest": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "BatchPredict": { - "methods": [ - "batch_predict" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py deleted file mode 100644 index 360a0d13..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed deleted file mode 100644 index 0560ba18..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-automl package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py deleted file mode 100644 index 89a37dc9..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py deleted file mode 100644 index 8f53357e..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import AutoMlClient -from .async_client import AutoMlAsyncClient - -__all__ = ( - 'AutoMlClient', - 'AutoMlAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py deleted file mode 100644 index b905c3da..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/async_client.py +++ /dev/null @@ -1,3170 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union - -from google.cloud.automl_v1beta1 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1beta1.services.auto_ml import pagers -from google.cloud.automl_v1beta1.types import annotation_spec -from google.cloud.automl_v1beta1.types import classification -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import data_stats -from google.cloud.automl_v1beta1.types import data_types -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import detection -from google.cloud.automl_v1beta1.types import image -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model as gca_model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import operations -from google.cloud.automl_v1beta1.types import regression -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.cloud.automl_v1beta1.types import tables -from google.cloud.automl_v1beta1.types import text -from google.cloud.automl_v1beta1.types import text_extraction -from google.cloud.automl_v1beta1.types import text_sentiment -from google.cloud.automl_v1beta1.types import translation -from google.cloud.automl_v1beta1.types import video -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport -from .client import AutoMlClient - - -class AutoMlAsyncClient: - """AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - _client: AutoMlClient - - DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT - - annotation_spec_path = staticmethod(AutoMlClient.annotation_spec_path) - parse_annotation_spec_path = staticmethod(AutoMlClient.parse_annotation_spec_path) - column_spec_path = staticmethod(AutoMlClient.column_spec_path) - parse_column_spec_path = staticmethod(AutoMlClient.parse_column_spec_path) - dataset_path = staticmethod(AutoMlClient.dataset_path) - parse_dataset_path = staticmethod(AutoMlClient.parse_dataset_path) - model_path = staticmethod(AutoMlClient.model_path) - parse_model_path = staticmethod(AutoMlClient.parse_model_path) - model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path) - parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path) - table_spec_path = staticmethod(AutoMlClient.table_spec_path) - parse_table_spec_path = staticmethod(AutoMlClient.parse_table_spec_path) - common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(AutoMlClient.parse_common_billing_account_path) - common_folder_path = staticmethod(AutoMlClient.common_folder_path) - parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path) - common_organization_path = staticmethod(AutoMlClient.common_organization_path) - parse_common_organization_path = staticmethod(AutoMlClient.parse_common_organization_path) - common_project_path = staticmethod(AutoMlClient.common_project_path) - parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path) - common_location_path = staticmethod(AutoMlClient.common_location_path) - parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlAsyncClient: The constructed client. - """ - return AutoMlClient.from_service_account_info.__func__(AutoMlAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlAsyncClient: The constructed client. - """ - return AutoMlClient.from_service_account_file.__func__(AutoMlAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return AutoMlClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> AutoMlTransport: - """Returns the transport used by the client instance. - - Returns: - AutoMlTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(AutoMlClient).get_transport_class, type(AutoMlClient)) - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, AutoMlTransport] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the auto ml client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.AutoMlTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = AutoMlClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_dataset(self, - request: Optional[Union[service.CreateDatasetRequest, dict]] = None, - *, - parent: Optional[str] = None, - dataset: Optional[gca_dataset.Dataset] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Creates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_create_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - response = await client.create_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.CreateDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. - parent (:class:`str`): - Required. The resource name of the - project to create the dataset for. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`): - Required. The dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.CreateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_dataset(self, - request: Optional[Union[service.GetDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_get_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = await client.get_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.GetDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - name (:class:`str`): - Required. The resource name of the - dataset to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_datasets(self, - request: Optional[Union[service.ListDatasetsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsAsyncPager: - r"""Lists datasets in a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_list_datasets(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ListDatasetsRequest, dict]]): - The request object. Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - parent (:class:`str`): - Required. The resource name of the - project from which to list datasets. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager: - Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListDatasetsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_datasets, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDatasetsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_dataset(self, - request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, - *, - dataset: Optional[gca_dataset.Dataset] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_update_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = await client.update_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.UpdateDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`): - Required. The dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.UpdateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_dataset(self, - request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_delete_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.DeleteDatasetRequest, dict]]): - The request object. Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. - name (:class:`str`): - Required. The resource name of the - dataset to delete. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.DeleteDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def import_data(self, - request: Optional[Union[service.ImportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.InputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_import_data(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ImportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ImportDataRequest, dict]]): - The request object. Request message for - [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - name (:class:`str`): - Required. Dataset name. Dataset must - already exist. All imported annotations - and examples will be added. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (:class:`google.cloud.automl_v1beta1.types.InputConfig`): - Required. The desired input location - and its domain specific semantics, if - any. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ImportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def export_data(self, - request: Optional[Union[service.ExportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.OutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_export_data(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ExportDataRequest, dict]]): - The request object. Request message for - [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - name (:class:`str`): - Required. The resource name of the - dataset. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.automl_v1beta1.types.OutputConfig`): - Required. The desired output - location. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ExportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def get_annotation_spec(self, - request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_get_annotation_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_annotation_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest, dict]]): - The request object. Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. - name (:class:`str`): - Required. The resource name of the - annotation spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.AnnotationSpec: - A definition of an annotation spec. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetAnnotationSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_annotation_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_table_spec(self, - request: Optional[Union[service.GetTableSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table_spec.TableSpec: - r"""Gets a table spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_get_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetTableSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_table_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.GetTableSpecRequest, dict]]): - The request object. Request message for - [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. - name (:class:`str`): - Required. The resource name of the - table spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.TableSpec: - A specification of a relational table. - The table's schema is represented via its child - column specs. It is pre-populated as part of - ImportData by schema inference algorithm, the version - of which is a required parameter of ImportData - InputConfig. Note: While working with a table, at - times the schema may be inconsistent with the data in - the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a - model. Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetTableSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_table_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_table_specs(self, - request: Optional[Union[service.ListTableSpecsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTableSpecsAsyncPager: - r"""Lists table specs in a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_list_table_specs(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListTableSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_table_specs(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ListTableSpecsRequest, dict]]): - The request object. Request message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - parent (:class:`str`): - Required. The resource name of the - dataset to list table specs from. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager: - Response message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListTableSpecsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_table_specs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTableSpecsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_table_spec(self, - request: Optional[Union[service.UpdateTableSpecRequest, dict]] = None, - *, - table_spec: Optional[gca_table_spec.TableSpec] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_table_spec.TableSpec: - r"""Updates a table spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_update_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateTableSpecRequest( - ) - - # Make the request - response = await client.update_table_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.UpdateTableSpecRequest, dict]]): - The request object. Request message for - [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - table_spec (:class:`google.cloud.automl_v1beta1.types.TableSpec`): - Required. The table spec which - replaces the resource on the server. - - This corresponds to the ``table_spec`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.TableSpec: - A specification of a relational table. - The table's schema is represented via its child - column specs. It is pre-populated as part of - ImportData by schema inference algorithm, the version - of which is a required parameter of ImportData - InputConfig. Note: While working with a table, at - times the schema may be inconsistent with the data in - the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a - model. Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_spec]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.UpdateTableSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_spec is not None: - request.table_spec = table_spec - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_table_spec, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_spec.name", request.table_spec.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_column_spec(self, - request: Optional[Union[service.GetColumnSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> column_spec.ColumnSpec: - r"""Gets a column spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_get_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetColumnSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_column_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.GetColumnSpecRequest, dict]]): - The request object. Request message for - [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. - name (:class:`str`): - Required. The resource name of the - column spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.ColumnSpec: - A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were - given on import . Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetColumnSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_column_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_column_specs(self, - request: Optional[Union[service.ListColumnSpecsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListColumnSpecsAsyncPager: - r"""Lists column specs in a table spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_list_column_specs(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListColumnSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_column_specs(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ListColumnSpecsRequest, dict]]): - The request object. Request message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - parent (:class:`str`): - Required. The resource name of the - table spec to list column specs from. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager: - Response message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListColumnSpecsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_column_specs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListColumnSpecsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_column_spec(self, - request: Optional[Union[service.UpdateColumnSpecRequest, dict]] = None, - *, - column_spec: Optional[gca_column_spec.ColumnSpec] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_column_spec.ColumnSpec: - r"""Updates a column spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_update_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateColumnSpecRequest( - ) - - # Make the request - response = await client.update_column_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest, dict]]): - The request object. Request message for - [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - column_spec (:class:`google.cloud.automl_v1beta1.types.ColumnSpec`): - Required. The column spec which - replaces the resource on the server. - - This corresponds to the ``column_spec`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.ColumnSpec: - A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were - given on import . Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([column_spec]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.UpdateColumnSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if column_spec is not None: - request.column_spec = column_spec - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_column_spec, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("column_spec.name", request.column_spec.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_model(self, - request: Optional[Union[service.CreateModelRequest, dict]] = None, - *, - parent: Optional[str] = None, - model: Optional[gca_model.Model] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_create_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.CreateModelRequest, dict]]): - The request object. Request message for - [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - parent (:class:`str`): - Required. Resource name of the parent - project where the model is being - created. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (:class:`google.cloud.automl_v1beta1.types.Model`): - Required. The model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.automl_v1beta1.types.Model` API - proto representing a trained machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.CreateModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_model.Model, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model(self, - request: Optional[Union[service.GetModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a model. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_get_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.GetModelRequest, dict]]): - The request object. Request message for - [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - name (:class:`str`): - Required. Resource name of the model. - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Model: - API proto representing a trained - machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_models(self, - request: Optional[Union[service.ListModelsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsAsyncPager: - r"""Lists models. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_list_models(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ListModelsRequest, dict]]): - The request object. Request message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - parent (:class:`str`): - Required. Resource name of the - project, from which to list the models. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager: - Response message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListModelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_models, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_model(self, - request: Optional[Union[service.DeleteModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_delete_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.DeleteModelRequest, dict]]): - The request object. Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - name (:class:`str`): - Required. Resource name of the model - being deleted. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.DeleteModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_model(self, - request: Optional[Union[service.DeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - - [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_deploy_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.DeployModelRequest, dict]]): - The request object. Request message for - [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - name (:class:`str`): - Required. Resource name of the model - to deploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.DeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_model(self, - request: Optional[Union[service.UndeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_undeploy_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.UndeployModelRequest, dict]]): - The request object. Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. - name (:class:`str`): - Required. Resource name of the model - to undeploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.UndeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def export_model(self, - request: Optional[Union[service.ExportModelRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.ModelExportOutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_export_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportModelRequest( - name="name_value", - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ExportModelRequest, dict]]): - The request object. Request message for - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an - error code will be returned. - name (:class:`str`): - Required. The resource name of the - model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.automl_v1beta1.types.ModelExportOutputConfig`): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ExportModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def export_evaluated_examples(self, - request: Optional[Union[service.ExportEvaluatedExamplesRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.ExportEvaluatedExamplesOutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports examples on which the model was evaluated (i.e. which - were in the TEST set of the dataset the model was created from), - together with their ground truth annotations and the annotations - created (predicted) by the model. The examples, ground truth and - predictions are exported in the state they were at the moment - the model was evaluated. - - This export is available only for 30 days since the model - evaluation is created. - - Currently only available for Tables. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_export_evaluated_examples(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportEvaluatedExamplesRequest( - name="name_value", - ) - - # Make the request - operation = client.export_evaluated_examples(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest, dict]]): - The request object. Request message for - [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. - name (:class:`str`): - Required. The resource name of the - model whose evaluated examples are to be - exported. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig`): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ExportEvaluatedExamplesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_evaluated_examples, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation(self, - request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a model evaluation. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_get_model_evaluation(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model_evaluation(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.GetModelEvaluationRequest, dict]]): - The request object. Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. - name (:class:`str`): - Required. Resource name for the model - evaluation. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.ModelEvaluation: - Evaluation results of a model. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.GetModelEvaluationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluations(self, - request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsAsyncPager: - r"""Lists model evaluations. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_list_model_evaluations(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelEvaluationsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest, dict]]): - The request object. Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - parent (:class:`str`): - Required. Resource name of the model - to list the model evaluations for. If - modelId is set as "-", this will list - model evaluations from across all models - of the parent location. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: - Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = service.ListModelEvaluationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluations, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self) -> "AutoMlAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "AutoMlAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py deleted file mode 100644 index e738a43f..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/client.py +++ /dev/null @@ -1,3335 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast - -from google.cloud.automl_v1beta1 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1beta1.services.auto_ml import pagers -from google.cloud.automl_v1beta1.types import annotation_spec -from google.cloud.automl_v1beta1.types import classification -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import data_stats -from google.cloud.automl_v1beta1.types import data_types -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import detection -from google.cloud.automl_v1beta1.types import image -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model as gca_model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import operations -from google.cloud.automl_v1beta1.types import regression -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.cloud.automl_v1beta1.types import tables -from google.cloud.automl_v1beta1.types import text -from google.cloud.automl_v1beta1.types import text_extraction -from google.cloud.automl_v1beta1.types import text_sentiment -from google.cloud.automl_v1beta1.types import translation -from google.cloud.automl_v1beta1.types import video -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import AutoMlGrpcTransport -from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport -from .transports.rest import AutoMlRestTransport - - -class AutoMlClientMeta(type): - """Metaclass for the AutoMl client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] - _transport_registry["grpc"] = AutoMlGrpcTransport - _transport_registry["grpc_asyncio"] = AutoMlGrpcAsyncIOTransport - _transport_registry["rest"] = AutoMlRestTransport - - def get_transport_class(cls, - label: Optional[str] = None, - ) -> Type[AutoMlTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class AutoMlClient(metaclass=AutoMlClientMeta): - """AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "automl.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoMlClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> AutoMlTransport: - """Returns the transport used by the client instance. - - Returns: - AutoMlTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: - """Returns a fully-qualified annotation_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - - @staticmethod - def parse_annotation_spec_path(path: str) -> Dict[str,str]: - """Parses a annotation_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def column_spec_path(project: str,location: str,dataset: str,table_spec: str,column_spec: str,) -> str: - """Returns a fully-qualified column_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, column_spec=column_spec, ) - - @staticmethod - def parse_column_spec_path(path: str) -> Dict[str,str]: - """Parses a column_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)/columnSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_path(project: str,location: str,model: str,model_evaluation: str,) -> str: - """Returns a fully-qualified model_evaluation string.""" - return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) - - @staticmethod - def parse_model_evaluation_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def table_spec_path(project: str,location: str,dataset: str,table_spec: str,) -> str: - """Returns a fully-qualified table_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, ) - - @staticmethod - def parse_table_spec_path(path: str) -> Dict[str,str]: - """Parses a table_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/tableSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, AutoMlTransport]] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the auto ml client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, AutoMlTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) - - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) - - api_key_value = getattr(client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError("client_options.api_key and credentials are mutually exclusive") - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, AutoMlTransport): - # transport is a AutoMlTransport instance. - if credentials or client_options.credentials_file or api_key_value: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - import google.auth._default # type: ignore - - if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): - credentials = google.auth._default.get_api_key_credentials(api_key_value) - - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=client_options.api_audience, - ) - - def create_dataset(self, - request: Optional[Union[service.CreateDatasetRequest, dict]] = None, - *, - parent: Optional[str] = None, - dataset: Optional[gca_dataset.Dataset] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Creates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_create_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - response = client.create_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.CreateDatasetRequest, dict]): - The request object. Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. - parent (str): - Required. The resource name of the - project to create the dataset for. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (google.cloud.automl_v1beta1.types.Dataset): - Required. The dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.CreateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.CreateDatasetRequest): - request = service.CreateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_dataset(self, - request: Optional[Union[service.GetDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_get_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = client.get_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.GetDatasetRequest, dict]): - The request object. Request message for - [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - name (str): - Required. The resource name of the - dataset to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetDatasetRequest): - request = service.GetDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_datasets(self, - request: Optional[Union[service.ListDatasetsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsPager: - r"""Lists datasets in a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_list_datasets(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ListDatasetsRequest, dict]): - The request object. Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - parent (str): - Required. The resource name of the - project from which to list datasets. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsPager: - Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListDatasetsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListDatasetsRequest): - request = service.ListDatasetsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_datasets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDatasetsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_dataset(self, - request: Optional[Union[service.UpdateDatasetRequest, dict]] = None, - *, - dataset: Optional[gca_dataset.Dataset] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_update_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = client.update_dataset(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.UpdateDatasetRequest, dict]): - The request object. Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - dataset (google.cloud.automl_v1beta1.types.Dataset): - Required. The dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.UpdateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.UpdateDatasetRequest): - request = service.UpdateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_dataset(self, - request: Optional[Union[service.DeleteDatasetRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_delete_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.DeleteDatasetRequest, dict]): - The request object. Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. - name (str): - Required. The resource name of the - dataset to delete. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.DeleteDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.DeleteDatasetRequest): - request = service.DeleteDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def import_data(self, - request: Optional[Union[service.ImportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.InputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_import_data(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ImportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ImportDataRequest, dict]): - The request object. Request message for - [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - name (str): - Required. Dataset name. Dataset must - already exist. All imported annotations - and examples will be added. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (google.cloud.automl_v1beta1.types.InputConfig): - Required. The desired input location - and its domain specific semantics, if - any. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ImportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ImportDataRequest): - request = service.ImportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def export_data(self, - request: Optional[Union[service.ExportDataRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.OutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_export_data(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ExportDataRequest, dict]): - The request object. Request message for - [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - name (str): - Required. The resource name of the - dataset. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.automl_v1beta1.types.OutputConfig): - Required. The desired output - location. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ExportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ExportDataRequest): - request = service.ExportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def get_annotation_spec(self, - request: Optional[Union[service.GetAnnotationSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_get_annotation_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_annotation_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest, dict]): - The request object. Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. - name (str): - Required. The resource name of the - annotation spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.AnnotationSpec: - A definition of an annotation spec. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetAnnotationSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetAnnotationSpecRequest): - request = service.GetAnnotationSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_table_spec(self, - request: Optional[Union[service.GetTableSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table_spec.TableSpec: - r"""Gets a table spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_get_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetTableSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_table_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.GetTableSpecRequest, dict]): - The request object. Request message for - [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. - name (str): - Required. The resource name of the - table spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.TableSpec: - A specification of a relational table. - The table's schema is represented via its child - column specs. It is pre-populated as part of - ImportData by schema inference algorithm, the version - of which is a required parameter of ImportData - InputConfig. Note: While working with a table, at - times the schema may be inconsistent with the data in - the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a - model. Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetTableSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetTableSpecRequest): - request = service.GetTableSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_table_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_table_specs(self, - request: Optional[Union[service.ListTableSpecsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTableSpecsPager: - r"""Lists table specs in a dataset. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_list_table_specs(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListTableSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_table_specs(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ListTableSpecsRequest, dict]): - The request object. Request message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - parent (str): - Required. The resource name of the - dataset to list table specs from. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsPager: - Response message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListTableSpecsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListTableSpecsRequest): - request = service.ListTableSpecsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_table_specs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTableSpecsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_table_spec(self, - request: Optional[Union[service.UpdateTableSpecRequest, dict]] = None, - *, - table_spec: Optional[gca_table_spec.TableSpec] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_table_spec.TableSpec: - r"""Updates a table spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_update_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateTableSpecRequest( - ) - - # Make the request - response = client.update_table_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.UpdateTableSpecRequest, dict]): - The request object. Request message for - [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - table_spec (google.cloud.automl_v1beta1.types.TableSpec): - Required. The table spec which - replaces the resource on the server. - - This corresponds to the ``table_spec`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.TableSpec: - A specification of a relational table. - The table's schema is represented via its child - column specs. It is pre-populated as part of - ImportData by schema inference algorithm, the version - of which is a required parameter of ImportData - InputConfig. Note: While working with a table, at - times the schema may be inconsistent with the data in - the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a - model. Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_spec]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.UpdateTableSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.UpdateTableSpecRequest): - request = service.UpdateTableSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_spec is not None: - request.table_spec = table_spec - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_table_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_spec.name", request.table_spec.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_column_spec(self, - request: Optional[Union[service.GetColumnSpecRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> column_spec.ColumnSpec: - r"""Gets a column spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_get_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetColumnSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_column_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.GetColumnSpecRequest, dict]): - The request object. Request message for - [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. - name (str): - Required. The resource name of the - column spec to retrieve. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.ColumnSpec: - A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were - given on import . Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetColumnSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetColumnSpecRequest): - request = service.GetColumnSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_column_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_column_specs(self, - request: Optional[Union[service.ListColumnSpecsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListColumnSpecsPager: - r"""Lists column specs in a table spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_list_column_specs(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListColumnSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_column_specs(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ListColumnSpecsRequest, dict]): - The request object. Request message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - parent (str): - Required. The resource name of the - table spec to list column specs from. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsPager: - Response message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListColumnSpecsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListColumnSpecsRequest): - request = service.ListColumnSpecsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_column_specs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListColumnSpecsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_column_spec(self, - request: Optional[Union[service.UpdateColumnSpecRequest, dict]] = None, - *, - column_spec: Optional[gca_column_spec.ColumnSpec] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_column_spec.ColumnSpec: - r"""Updates a column spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_update_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateColumnSpecRequest( - ) - - # Make the request - response = client.update_column_spec(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest, dict]): - The request object. Request message for - [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): - Required. The column spec which - replaces the resource on the server. - - This corresponds to the ``column_spec`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.ColumnSpec: - A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were - given on import . Used by: \* Tables - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([column_spec]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.UpdateColumnSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.UpdateColumnSpecRequest): - request = service.UpdateColumnSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if column_spec is not None: - request.column_spec = column_spec - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_column_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("column_spec.name", request.column_spec.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_model(self, - request: Optional[Union[service.CreateModelRequest, dict]] = None, - *, - parent: Optional[str] = None, - model: Optional[gca_model.Model] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_create_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.CreateModelRequest, dict]): - The request object. Request message for - [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - parent (str): - Required. Resource name of the parent - project where the model is being - created. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (google.cloud.automl_v1beta1.types.Model): - Required. The model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.automl_v1beta1.types.Model` API - proto representing a trained machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.CreateModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.CreateModelRequest): - request = service.CreateModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gca_model.Model, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def get_model(self, - request: Optional[Union[service.GetModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a model. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_get_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = client.get_model(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.GetModelRequest, dict]): - The request object. Request message for - [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - name (str): - Required. Resource name of the model. - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.Model: - API proto representing a trained - machine learning model. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetModelRequest): - request = service.GetModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_models(self, - request: Optional[Union[service.ListModelsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsPager: - r"""Lists models. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_list_models(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ListModelsRequest, dict]): - The request object. Request message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - parent (str): - Required. Resource name of the - project, from which to list the models. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsPager: - Response message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListModelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListModelsRequest): - request = service.ListModelsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_models] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_model(self, - request: Optional[Union[service.DeleteModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_delete_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.DeleteModelRequest, dict]): - The request object. Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - name (str): - Required. Resource name of the model - being deleted. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.DeleteModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.DeleteModelRequest): - request = service.DeleteModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_model(self, - request: Optional[Union[service.DeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - - [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_deploy_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.DeployModelRequest, dict]): - The request object. Request message for - [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - name (str): - Required. Resource name of the model - to deploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.DeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.DeployModelRequest): - request = service.DeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_model(self, - request: Optional[Union[service.UndeployModelRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_undeploy_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.UndeployModelRequest, dict]): - The request object. Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. - name (str): - Required. Resource name of the model - to undeploy. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.UndeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.UndeployModelRequest): - request = service.UndeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def export_model(self, - request: Optional[Union[service.ExportModelRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.ModelExportOutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_export_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportModelRequest( - name="name_value", - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ExportModelRequest, dict]): - The request object. Request message for - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an - error code will be returned. - name (str): - Required. The resource name of the - model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.automl_v1beta1.types.ModelExportOutputConfig): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ExportModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ExportModelRequest): - request = service.ExportModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def export_evaluated_examples(self, - request: Optional[Union[service.ExportEvaluatedExamplesRequest, dict]] = None, - *, - name: Optional[str] = None, - output_config: Optional[io.ExportEvaluatedExamplesOutputConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Exports examples on which the model was evaluated (i.e. which - were in the TEST set of the dataset the model was created from), - together with their ground truth annotations and the annotations - created (predicted) by the model. The examples, ground truth and - predictions are exported in the state they were at the moment - the model was evaluated. - - This export is available only for 30 days since the model - evaluation is created. - - Currently only available for Tables. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_export_evaluated_examples(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportEvaluatedExamplesRequest( - name="name_value", - ) - - # Make the request - operation = client.export_evaluated_examples(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest, dict]): - The request object. Request message for - [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. - name (str): - Required. The resource name of the - model whose evaluated examples are to be - exported. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ExportEvaluatedExamplesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ExportEvaluatedExamplesRequest): - request = service.ExportEvaluatedExamplesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_evaluated_examples] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation(self, - request: Optional[Union[service.GetModelEvaluationRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a model evaluation. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_get_model_evaluation(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = client.get_model_evaluation(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.GetModelEvaluationRequest, dict]): - The request object. Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. - name (str): - Required. Resource name for the model - evaluation. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.ModelEvaluation: - Evaluation results of a model. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.GetModelEvaluationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.GetModelEvaluationRequest): - request = service.GetModelEvaluationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluations(self, - request: Optional[Union[service.ListModelEvaluationsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsPager: - r"""Lists model evaluations. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_list_model_evaluations(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelEvaluationsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest, dict]): - The request object. Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - parent (str): - Required. Resource name of the model - to list the model evaluations for. If - modelId is set as "-", this will list - model evaluations from across all models - of the parent location. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsPager: - Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a service.ListModelEvaluationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, service.ListModelEvaluationsRequest): - request = service.ListModelEvaluationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self) -> "AutoMlClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - - - - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "AutoMlClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py deleted file mode 100644 index 4d4d2676..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/pagers.py +++ /dev/null @@ -1,628 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec - - -class ListDatasetsPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListDatasetsResponse], - request: service.ListDatasetsRequest, - response: service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[dataset.Dataset]: - for page in self.pages: - yield from page.datasets - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDatasetsAsyncPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListDatasetsResponse]], - request: service.ListDatasetsRequest, - response: service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[dataset.Dataset]: - async def async_generator(): - async for page in self.pages: - for response in page.datasets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTableSpecsPager: - """A pager for iterating through ``list_table_specs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``table_specs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTableSpecs`` requests and continue to iterate - through the ``table_specs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListTableSpecsResponse], - request: service.ListTableSpecsRequest, - response: service.ListTableSpecsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListTableSpecsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListTableSpecsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListTableSpecsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListTableSpecsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[table_spec.TableSpec]: - for page in self.pages: - yield from page.table_specs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTableSpecsAsyncPager: - """A pager for iterating through ``list_table_specs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``table_specs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTableSpecs`` requests and continue to iterate - through the ``table_specs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListTableSpecsResponse]], - request: service.ListTableSpecsRequest, - response: service.ListTableSpecsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListTableSpecsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListTableSpecsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListTableSpecsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListTableSpecsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[table_spec.TableSpec]: - async def async_generator(): - async for page in self.pages: - for response in page.table_specs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListColumnSpecsPager: - """A pager for iterating through ``list_column_specs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``column_specs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListColumnSpecs`` requests and continue to iterate - through the ``column_specs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListColumnSpecsResponse], - request: service.ListColumnSpecsRequest, - response: service.ListColumnSpecsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListColumnSpecsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListColumnSpecsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListColumnSpecsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListColumnSpecsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[column_spec.ColumnSpec]: - for page in self.pages: - yield from page.column_specs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListColumnSpecsAsyncPager: - """A pager for iterating through ``list_column_specs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``column_specs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListColumnSpecs`` requests and continue to iterate - through the ``column_specs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListColumnSpecsResponse]], - request: service.ListColumnSpecsRequest, - response: service.ListColumnSpecsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListColumnSpecsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListColumnSpecsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListColumnSpecsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListColumnSpecsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[column_spec.ColumnSpec]: - async def async_generator(): - async for page in self.pages: - for response in page.column_specs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``model`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListModelsResponse], - request: service.ListModelsRequest, - response: service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListModelsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model.Model]: - for page in self.pages: - yield from page.model - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsAsyncPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``model`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListModelsResponse]], - request: service.ListModelsRequest, - response: service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListModelsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[model.Model]: - async def async_generator(): - async for page in self.pages: - for response in page.model: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluation`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluation`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., service.ListModelEvaluationsResponse], - request: service.ListModelEvaluationsRequest, - response: service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: - for page in self.pages: - yield from page.model_evaluation - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsAsyncPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluation`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluation`` field on the - corresponding responses. - - All the usual :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[service.ListModelEvaluationsResponse]], - request: service.ListModelEvaluationsRequest, - response: service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluation: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py deleted file mode 100644 index 9d86479d..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import AutoMlTransport -from .grpc import AutoMlGrpcTransport -from .grpc_asyncio import AutoMlGrpcAsyncIOTransport -from .rest import AutoMlRestTransport -from .rest import AutoMlRestInterceptor - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[AutoMlTransport]] -_transport_registry['grpc'] = AutoMlGrpcTransport -_transport_registry['grpc_asyncio'] = AutoMlGrpcAsyncIOTransport -_transport_registry['rest'] = AutoMlRestTransport - -__all__ = ( - 'AutoMlTransport', - 'AutoMlGrpcTransport', - 'AutoMlGrpcAsyncIOTransport', - 'AutoMlRestTransport', - 'AutoMlRestInterceptor', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py deleted file mode 100644 index 2ed29d7c..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py +++ /dev/null @@ -1,570 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.automl_v1beta1 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.automl_v1beta1.types import annotation_spec -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.longrunning import operations_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -class AutoMlTransport(abc.ABC): - """Abstract transport class for AutoMl.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'automl.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience(api_audience if api_audience else host) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.import_data: gapic_v1.method.wrap_method( - self.import_data, - default_timeout=5.0, - client_info=client_info, - ), - self.export_data: gapic_v1.method.wrap_method( - self.export_data, - default_timeout=5.0, - client_info=client_info, - ), - self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.get_table_spec: gapic_v1.method.wrap_method( - self.get_table_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_table_specs: gapic_v1.method.wrap_method( - self.list_table_specs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.update_table_spec: gapic_v1.method.wrap_method( - self.update_table_spec, - default_timeout=5.0, - client_info=client_info, - ), - self.get_column_spec: gapic_v1.method.wrap_method( - self.get_column_spec, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_column_specs: gapic_v1.method.wrap_method( - self.list_column_specs, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.update_column_spec: gapic_v1.method.wrap_method( - self.update_column_spec, - default_timeout=5.0, - client_info=client_info, - ), - self.create_model: gapic_v1.method.wrap_method( - self.create_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model: gapic_v1.method.wrap_method( - self.get_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_models: gapic_v1.method.wrap_method( - self.list_models, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, - default_timeout=5.0, - client_info=client_info, - ), - self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, - default_timeout=5.0, - client_info=client_info, - ), - self.export_model: gapic_v1.method.wrap_method( - self.export_model, - default_timeout=5.0, - client_info=client_info, - ), - self.export_evaluated_examples: gapic_v1.method.wrap_method( - self.export_evaluated_examples, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=5.0, - ), - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluations: gapic_v1.method.wrap_method( - self.list_model_evaluations, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - Union[ - gca_dataset.Dataset, - Awaitable[gca_dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - Union[ - dataset.Dataset, - Awaitable[dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - Union[ - service.ListDatasetsResponse, - Awaitable[service.ListDatasetsResponse] - ]]: - raise NotImplementedError() - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - Union[ - gca_dataset.Dataset, - Awaitable[gca_dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - Union[ - annotation_spec.AnnotationSpec, - Awaitable[annotation_spec.AnnotationSpec] - ]]: - raise NotImplementedError() - - @property - def get_table_spec(self) -> Callable[ - [service.GetTableSpecRequest], - Union[ - table_spec.TableSpec, - Awaitable[table_spec.TableSpec] - ]]: - raise NotImplementedError() - - @property - def list_table_specs(self) -> Callable[ - [service.ListTableSpecsRequest], - Union[ - service.ListTableSpecsResponse, - Awaitable[service.ListTableSpecsResponse] - ]]: - raise NotImplementedError() - - @property - def update_table_spec(self) -> Callable[ - [service.UpdateTableSpecRequest], - Union[ - gca_table_spec.TableSpec, - Awaitable[gca_table_spec.TableSpec] - ]]: - raise NotImplementedError() - - @property - def get_column_spec(self) -> Callable[ - [service.GetColumnSpecRequest], - Union[ - column_spec.ColumnSpec, - Awaitable[column_spec.ColumnSpec] - ]]: - raise NotImplementedError() - - @property - def list_column_specs(self) -> Callable[ - [service.ListColumnSpecsRequest], - Union[ - service.ListColumnSpecsResponse, - Awaitable[service.ListColumnSpecsResponse] - ]]: - raise NotImplementedError() - - @property - def update_column_spec(self) -> Callable[ - [service.UpdateColumnSpecRequest], - Union[ - gca_column_spec.ColumnSpec, - Awaitable[gca_column_spec.ColumnSpec] - ]]: - raise NotImplementedError() - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - Union[ - model.Model, - Awaitable[model.Model] - ]]: - raise NotImplementedError() - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - Union[ - service.ListModelsResponse, - Awaitable[service.ListModelsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_evaluated_examples(self) -> Callable[ - [service.ExportEvaluatedExamplesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - Union[ - model_evaluation.ModelEvaluation, - Awaitable[model_evaluation.ModelEvaluation] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - Union[ - service.ListModelEvaluationsResponse, - Awaitable[service.ListModelEvaluationsResponse] - ]]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ( - 'AutoMlTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py deleted file mode 100644 index f9aa5e51..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py +++ /dev/null @@ -1,971 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.automl_v1beta1.types import annotation_spec -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.longrunning import operations_pb2 # type: ignore -from .base import AutoMlTransport, DEFAULT_CLIENT_INFO - - -class AutoMlGrpcTransport(AutoMlTransport): - """gRPC backend transport for AutoMl. - - AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - gca_dataset.Dataset]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/CreateDataset', - request_serializer=service.CreateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - dataset.Dataset]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a dataset. - - Returns: - Callable[[~.GetDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetDataset', - request_serializer=service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - service.ListDatasetsResponse]: - r"""Return a callable for the list datasets method over gRPC. - - Lists datasets in a project. - - Returns: - Callable[[~.ListDatasetsRequest], - ~.ListDatasetsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListDatasets', - request_serializer=service.ListDatasetsRequest.serialize, - response_deserializer=service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - gca_dataset.Dataset]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UpdateDataset', - request_serializer=service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/DeleteDataset', - request_serializer=service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - Returns: - Callable[[~.ImportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ImportData', - request_serializer=service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the export data method over gRPC. - - Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ExportData', - request_serializer=service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an annotation spec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - ~.AnnotationSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec', - request_serializer=service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def get_table_spec(self) -> Callable[ - [service.GetTableSpecRequest], - table_spec.TableSpec]: - r"""Return a callable for the get table spec method over gRPC. - - Gets a table spec. - - Returns: - Callable[[~.GetTableSpecRequest], - ~.TableSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_table_spec' not in self._stubs: - self._stubs['get_table_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetTableSpec', - request_serializer=service.GetTableSpecRequest.serialize, - response_deserializer=table_spec.TableSpec.deserialize, - ) - return self._stubs['get_table_spec'] - - @property - def list_table_specs(self) -> Callable[ - [service.ListTableSpecsRequest], - service.ListTableSpecsResponse]: - r"""Return a callable for the list table specs method over gRPC. - - Lists table specs in a dataset. - - Returns: - Callable[[~.ListTableSpecsRequest], - ~.ListTableSpecsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_table_specs' not in self._stubs: - self._stubs['list_table_specs'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs', - request_serializer=service.ListTableSpecsRequest.serialize, - response_deserializer=service.ListTableSpecsResponse.deserialize, - ) - return self._stubs['list_table_specs'] - - @property - def update_table_spec(self) -> Callable[ - [service.UpdateTableSpecRequest], - gca_table_spec.TableSpec]: - r"""Return a callable for the update table spec method over gRPC. - - Updates a table spec. - - Returns: - Callable[[~.UpdateTableSpecRequest], - ~.TableSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_table_spec' not in self._stubs: - self._stubs['update_table_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec', - request_serializer=service.UpdateTableSpecRequest.serialize, - response_deserializer=gca_table_spec.TableSpec.deserialize, - ) - return self._stubs['update_table_spec'] - - @property - def get_column_spec(self) -> Callable[ - [service.GetColumnSpecRequest], - column_spec.ColumnSpec]: - r"""Return a callable for the get column spec method over gRPC. - - Gets a column spec. - - Returns: - Callable[[~.GetColumnSpecRequest], - ~.ColumnSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_column_spec' not in self._stubs: - self._stubs['get_column_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec', - request_serializer=service.GetColumnSpecRequest.serialize, - response_deserializer=column_spec.ColumnSpec.deserialize, - ) - return self._stubs['get_column_spec'] - - @property - def list_column_specs(self) -> Callable[ - [service.ListColumnSpecsRequest], - service.ListColumnSpecsResponse]: - r"""Return a callable for the list column specs method over gRPC. - - Lists column specs in a table spec. - - Returns: - Callable[[~.ListColumnSpecsRequest], - ~.ListColumnSpecsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_column_specs' not in self._stubs: - self._stubs['list_column_specs'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs', - request_serializer=service.ListColumnSpecsRequest.serialize, - response_deserializer=service.ListColumnSpecsResponse.deserialize, - ) - return self._stubs['list_column_specs'] - - @property - def update_column_spec(self) -> Callable[ - [service.UpdateColumnSpecRequest], - gca_column_spec.ColumnSpec]: - r"""Return a callable for the update column spec method over gRPC. - - Updates a column spec. - - Returns: - Callable[[~.UpdateColumnSpecRequest], - ~.ColumnSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_column_spec' not in self._stubs: - self._stubs['update_column_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec', - request_serializer=service.UpdateColumnSpecRequest.serialize, - response_deserializer=gca_column_spec.ColumnSpec.deserialize, - ) - return self._stubs['update_column_spec'] - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the create model method over gRPC. - - Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - Returns: - Callable[[~.CreateModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model' not in self._stubs: - self._stubs['create_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/CreateModel', - request_serializer=service.CreateModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_model'] - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - model.Model]: - r"""Return a callable for the get model method over gRPC. - - Gets a model. - - Returns: - Callable[[~.GetModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetModel', - request_serializer=service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - service.ListModelsResponse]: - r"""Return a callable for the list models method over gRPC. - - Lists models. - - Returns: - Callable[[~.ListModelsRequest], - ~.ListModelsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListModels', - request_serializer=service.ListModelsRequest.serialize, - response_deserializer=service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/DeleteModel', - request_serializer=service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - - [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.DeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/DeployModel', - request_serializer=service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.UndeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UndeployModel', - request_serializer=service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ExportModel', - request_serializer=service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def export_evaluated_examples(self) -> Callable[ - [service.ExportEvaluatedExamplesRequest], - operations_pb2.Operation]: - r"""Return a callable for the export evaluated examples method over gRPC. - - Exports examples on which the model was evaluated (i.e. which - were in the TEST set of the dataset the model was created from), - together with their ground truth annotations and the annotations - created (predicted) by the model. The examples, ground truth and - predictions are exported in the state they were at the moment - the model was evaluated. - - This export is available only for 30 days since the model - evaluation is created. - - Currently only available for Tables. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportEvaluatedExamplesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_evaluated_examples' not in self._stubs: - self._stubs['export_evaluated_examples'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples', - request_serializer=service.ExportEvaluatedExamplesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_evaluated_examples'] - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a model evaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - ~.ModelEvaluation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation', - request_serializer=service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - service.ListModelEvaluationsResponse]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists model evaluations. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - ~.ListModelEvaluationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations', - request_serializer=service.ListModelEvaluationsRequest.serialize, - response_deserializer=service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - def close(self): - self.grpc_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ( - 'AutoMlGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py deleted file mode 100644 index bfb03112..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py +++ /dev/null @@ -1,970 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.automl_v1beta1.types import annotation_spec -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.longrunning import operations_pb2 # type: ignore -from .base import AutoMlTransport, DEFAULT_CLIENT_INFO -from .grpc import AutoMlGrpcTransport - - -class AutoMlGrpcAsyncIOTransport(AutoMlTransport): - """gRPC AsyncIO backend transport for AutoMl. - - AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - Awaitable[gca_dataset.Dataset]]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/CreateDataset', - request_serializer=service.CreateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - Awaitable[dataset.Dataset]]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a dataset. - - Returns: - Callable[[~.GetDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetDataset', - request_serializer=service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - Awaitable[service.ListDatasetsResponse]]: - r"""Return a callable for the list datasets method over gRPC. - - Lists datasets in a project. - - Returns: - Callable[[~.ListDatasetsRequest], - Awaitable[~.ListDatasetsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListDatasets', - request_serializer=service.ListDatasetsRequest.serialize, - response_deserializer=service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - Awaitable[gca_dataset.Dataset]]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UpdateDataset', - request_serializer=service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a dataset and all of its contents. Returns empty - response in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/DeleteDataset', - request_serializer=service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a dataset. For Tables this method can only be - called on an empty Dataset. - - For Tables: - - - A - [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params] - parameter must be explicitly set. Returns an empty response - in the [response][google.longrunning.Operation.response] - field when it completes. - - Returns: - Callable[[~.ImportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ImportData', - request_serializer=service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export data method over gRPC. - - Exports dataset's data to the provided output location. Returns - an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ExportData', - request_serializer=service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - Awaitable[annotation_spec.AnnotationSpec]]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an annotation spec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - Awaitable[~.AnnotationSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetAnnotationSpec', - request_serializer=service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def get_table_spec(self) -> Callable[ - [service.GetTableSpecRequest], - Awaitable[table_spec.TableSpec]]: - r"""Return a callable for the get table spec method over gRPC. - - Gets a table spec. - - Returns: - Callable[[~.GetTableSpecRequest], - Awaitable[~.TableSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_table_spec' not in self._stubs: - self._stubs['get_table_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetTableSpec', - request_serializer=service.GetTableSpecRequest.serialize, - response_deserializer=table_spec.TableSpec.deserialize, - ) - return self._stubs['get_table_spec'] - - @property - def list_table_specs(self) -> Callable[ - [service.ListTableSpecsRequest], - Awaitable[service.ListTableSpecsResponse]]: - r"""Return a callable for the list table specs method over gRPC. - - Lists table specs in a dataset. - - Returns: - Callable[[~.ListTableSpecsRequest], - Awaitable[~.ListTableSpecsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_table_specs' not in self._stubs: - self._stubs['list_table_specs'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListTableSpecs', - request_serializer=service.ListTableSpecsRequest.serialize, - response_deserializer=service.ListTableSpecsResponse.deserialize, - ) - return self._stubs['list_table_specs'] - - @property - def update_table_spec(self) -> Callable[ - [service.UpdateTableSpecRequest], - Awaitable[gca_table_spec.TableSpec]]: - r"""Return a callable for the update table spec method over gRPC. - - Updates a table spec. - - Returns: - Callable[[~.UpdateTableSpecRequest], - Awaitable[~.TableSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_table_spec' not in self._stubs: - self._stubs['update_table_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UpdateTableSpec', - request_serializer=service.UpdateTableSpecRequest.serialize, - response_deserializer=gca_table_spec.TableSpec.deserialize, - ) - return self._stubs['update_table_spec'] - - @property - def get_column_spec(self) -> Callable[ - [service.GetColumnSpecRequest], - Awaitable[column_spec.ColumnSpec]]: - r"""Return a callable for the get column spec method over gRPC. - - Gets a column spec. - - Returns: - Callable[[~.GetColumnSpecRequest], - Awaitable[~.ColumnSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_column_spec' not in self._stubs: - self._stubs['get_column_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetColumnSpec', - request_serializer=service.GetColumnSpecRequest.serialize, - response_deserializer=column_spec.ColumnSpec.deserialize, - ) - return self._stubs['get_column_spec'] - - @property - def list_column_specs(self) -> Callable[ - [service.ListColumnSpecsRequest], - Awaitable[service.ListColumnSpecsResponse]]: - r"""Return a callable for the list column specs method over gRPC. - - Lists column specs in a table spec. - - Returns: - Callable[[~.ListColumnSpecsRequest], - Awaitable[~.ListColumnSpecsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_column_specs' not in self._stubs: - self._stubs['list_column_specs'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListColumnSpecs', - request_serializer=service.ListColumnSpecsRequest.serialize, - response_deserializer=service.ListColumnSpecsResponse.deserialize, - ) - return self._stubs['list_column_specs'] - - @property - def update_column_spec(self) -> Callable[ - [service.UpdateColumnSpecRequest], - Awaitable[gca_column_spec.ColumnSpec]]: - r"""Return a callable for the update column spec method over gRPC. - - Updates a column spec. - - Returns: - Callable[[~.UpdateColumnSpecRequest], - Awaitable[~.ColumnSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_column_spec' not in self._stubs: - self._stubs['update_column_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UpdateColumnSpec', - request_serializer=service.UpdateColumnSpecRequest.serialize, - response_deserializer=gca_column_spec.ColumnSpec.deserialize, - ) - return self._stubs['update_column_spec'] - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create model method over gRPC. - - Creates a model. Returns a Model in the - [response][google.longrunning.Operation.response] field when it - completes. When you create a model, several model evaluations - are created for it: a global evaluation, and one evaluation for - each annotation spec. - - Returns: - Callable[[~.CreateModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model' not in self._stubs: - self._stubs['create_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/CreateModel', - request_serializer=service.CreateModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_model'] - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - Awaitable[model.Model]]: - r"""Return a callable for the get model method over gRPC. - - Gets a model. - - Returns: - Callable[[~.GetModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetModel', - request_serializer=service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - Awaitable[service.ListModelsResponse]]: - r"""Return a callable for the list models method over gRPC. - - Lists models. - - Returns: - Callable[[~.ListModelsRequest], - Awaitable[~.ListModelsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListModels', - request_serializer=service.ListModelsRequest.serialize, - response_deserializer=service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a model. Returns ``google.protobuf.Empty`` in the - [response][google.longrunning.Operation.response] field when it - completes, and ``delete_details`` in the - [metadata][google.longrunning.Operation.metadata] field. - - Returns: - Callable[[~.DeleteModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/DeleteModel', - request_serializer=service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a model. If a model is already deployed, deploying it - with the same parameters has no effect. Deploying with different - parametrs (as e.g. changing - - [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) - will reset the deployment state without pausing the model's - availability. - - Only applicable for Text Classification, Image Object Detection - , Tables, and Image Segmentation; all other domains manage - deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.DeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/DeployModel', - request_serializer=service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a model. If the model is not deployed this method has - no effect. - - Only applicable for Text Classification, Image Object Detection - and Tables; all other domains manage deployment automatically. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.UndeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/UndeployModel', - request_serializer=service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, "export-able", model to a user specified - Google Cloud Storage location. A model is considered export-able - if and only if it has an export format defined for it in - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ExportModel', - request_serializer=service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def export_evaluated_examples(self) -> Callable[ - [service.ExportEvaluatedExamplesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export evaluated examples method over gRPC. - - Exports examples on which the model was evaluated (i.e. which - were in the TEST set of the dataset the model was created from), - together with their ground truth annotations and the annotations - created (predicted) by the model. The examples, ground truth and - predictions are exported in the state they were at the moment - the model was evaluated. - - This export is available only for 30 days since the model - evaluation is created. - - Currently only available for Tables. - - Returns an empty response in the - [response][google.longrunning.Operation.response] field when it - completes. - - Returns: - Callable[[~.ExportEvaluatedExamplesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_evaluated_examples' not in self._stubs: - self._stubs['export_evaluated_examples'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ExportEvaluatedExamples', - request_serializer=service.ExportEvaluatedExamplesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_evaluated_examples'] - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - Awaitable[model_evaluation.ModelEvaluation]]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a model evaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - Awaitable[~.ModelEvaluation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/GetModelEvaluation', - request_serializer=service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - Awaitable[service.ListModelEvaluationsResponse]]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists model evaluations. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - Awaitable[~.ListModelEvaluationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.AutoMl/ListModelEvaluations', - request_serializer=service.ListModelEvaluationsRequest.serialize, - response_deserializer=service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'AutoMlGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py deleted file mode 100644 index c1876de4..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/auto_ml/transports/rest.py +++ /dev/null @@ -1,3091 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from google.api_core import operations_v1 -from requests import __version__ as requests_version -import dataclasses -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - - -from google.cloud.automl_v1beta1.types import annotation_spec -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.longrunning import operations_pb2 # type: ignore - -from .base import AutoMlTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=requests_version, -) - - -class AutoMlRestInterceptor: - """Interceptor for AutoMl. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the AutoMlRestTransport. - - .. code-block:: python - class MyCustomAutoMlInterceptor(AutoMlRestInterceptor): - def pre_create_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_delete_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_delete_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_deploy_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_deploy_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_export_data(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_export_data(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_export_evaluated_examples(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_export_evaluated_examples(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_export_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_export_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_annotation_spec(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_annotation_spec(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_column_spec(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_column_spec(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_model_evaluation(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_model_evaluation(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_table_spec(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_table_spec(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_import_data(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_import_data(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_column_specs(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_column_specs(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_datasets(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_datasets(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_model_evaluations(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_model_evaluations(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_models(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_models(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_table_specs(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_table_specs(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_undeploy_model(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_undeploy_model(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_column_spec(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_column_spec(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_dataset(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_dataset(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_table_spec(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_table_spec(self, response): - logging.log(f"Received response: {response}") - return response - - transport = AutoMlRestTransport(interceptor=MyCustomAutoMlInterceptor()) - client = AutoMlClient(transport=transport) - - - """ - def pre_create_dataset(self, request: service.CreateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_create_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Dataset: - """Post-rpc interceptor for create_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_create_model(self, request: service.CreateModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.CreateModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_create_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for create_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_delete_dataset(self, request: service.DeleteDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_delete_dataset(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for delete_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_delete_model(self, request: service.DeleteModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeleteModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_delete_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for delete_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_deploy_model(self, request: service.DeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.DeployModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for deploy_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_deploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for deploy_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_export_data(self, request: service.ExportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportDataRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for export_data - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_export_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for export_data - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_export_evaluated_examples(self, request: service.ExportEvaluatedExamplesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportEvaluatedExamplesRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for export_evaluated_examples - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_export_evaluated_examples(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for export_evaluated_examples - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_export_model(self, request: service.ExportModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ExportModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for export_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_export_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for export_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_annotation_spec(self, request: service.GetAnnotationSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetAnnotationSpecRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_annotation_spec - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_annotation_spec(self, response: annotation_spec.AnnotationSpec) -> annotation_spec.AnnotationSpec: - """Post-rpc interceptor for get_annotation_spec - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_column_spec(self, request: service.GetColumnSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetColumnSpecRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_column_spec - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_column_spec(self, response: column_spec.ColumnSpec) -> column_spec.ColumnSpec: - """Post-rpc interceptor for get_column_spec - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_dataset(self, request: service.GetDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: - """Post-rpc interceptor for get_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_model(self, request: service.GetModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_model(self, response: model.Model) -> model.Model: - """Post-rpc interceptor for get_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_model_evaluation(self, request: service.GetModelEvaluationRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetModelEvaluationRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_model_evaluation - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_model_evaluation(self, response: model_evaluation.ModelEvaluation) -> model_evaluation.ModelEvaluation: - """Post-rpc interceptor for get_model_evaluation - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_get_table_spec(self, request: service.GetTableSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.GetTableSpecRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_table_spec - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_get_table_spec(self, response: table_spec.TableSpec) -> table_spec.TableSpec: - """Post-rpc interceptor for get_table_spec - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_import_data(self, request: service.ImportDataRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ImportDataRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for import_data - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_import_data(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for import_data - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_column_specs(self, request: service.ListColumnSpecsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListColumnSpecsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_column_specs - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_column_specs(self, response: service.ListColumnSpecsResponse) -> service.ListColumnSpecsResponse: - """Post-rpc interceptor for list_column_specs - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_datasets(self, request: service.ListDatasetsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListDatasetsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_datasets - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_datasets(self, response: service.ListDatasetsResponse) -> service.ListDatasetsResponse: - """Post-rpc interceptor for list_datasets - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_model_evaluations(self, request: service.ListModelEvaluationsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelEvaluationsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_model_evaluations - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_model_evaluations(self, response: service.ListModelEvaluationsResponse) -> service.ListModelEvaluationsResponse: - """Post-rpc interceptor for list_model_evaluations - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_models(self, request: service.ListModelsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListModelsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_models - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_models(self, response: service.ListModelsResponse) -> service.ListModelsResponse: - """Post-rpc interceptor for list_models - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_list_table_specs(self, request: service.ListTableSpecsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.ListTableSpecsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_table_specs - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_list_table_specs(self, response: service.ListTableSpecsResponse) -> service.ListTableSpecsResponse: - """Post-rpc interceptor for list_table_specs - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_undeploy_model(self, request: service.UndeployModelRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UndeployModelRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for undeploy_model - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_undeploy_model(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for undeploy_model - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_update_column_spec(self, request: service.UpdateColumnSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateColumnSpecRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_column_spec - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_update_column_spec(self, response: gca_column_spec.ColumnSpec) -> gca_column_spec.ColumnSpec: - """Post-rpc interceptor for update_column_spec - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_update_dataset(self, request: service.UpdateDatasetRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateDatasetRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_dataset - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_update_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Dataset: - """Post-rpc interceptor for update_dataset - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - def pre_update_table_spec(self, request: service.UpdateTableSpecRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[service.UpdateTableSpecRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_table_spec - - Override in a subclass to manipulate the request or metadata - before they are sent to the AutoMl server. - """ - return request, metadata - - def post_update_table_spec(self, response: gca_table_spec.TableSpec) -> gca_table_spec.TableSpec: - """Post-rpc interceptor for update_table_spec - - Override in a subclass to manipulate the response - after it is returned by the AutoMl server but before - it is returned to user code. - """ - return response - - -@dataclasses.dataclass -class AutoMlRestStub: - _session: AuthorizedSession - _host: str - _interceptor: AutoMlRestInterceptor - - -class AutoMlRestTransport(AutoMlTransport): - """REST backend transport for AutoMl. - - AutoML Server API. - - The resource names are assigned by the server. The server never - reuses names that it has created after the resources with those - names are deleted. - - An ID of a resource is the last element of the item's resource name. - For - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, - then the id for the item is ``{dataset_id}``. - - Currently the only supported ``location_id`` is "us-central1". - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - - """ - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[ - ], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = 'https', - interceptor: Optional[AutoMlRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or AutoMlRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - 'google.longrunning.Operations.CancelOperation': [ - { - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:cancel', - 'body': '*', - }, - ], - 'google.longrunning.Operations.DeleteOperation': [ - { - 'method': 'delete', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.GetOperation': [ - { - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.ListOperations': [ - { - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*}/operations', - }, - ], - 'google.longrunning.Operations.WaitOperation': [ - { - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:wait', - 'body': '*', - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v1beta1") - - self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) - - # Return the client from cache. - return self._operations_client - - class _CreateDataset(AutoMlRestStub): - def __hash__(self): - return hash("CreateDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.CreateDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> gca_dataset.Dataset: - r"""Call the create dataset method over HTTP. - - Args: - request (~.service.CreateDatasetRequest): - The request object. Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.gca_dataset.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{parent=projects/*/locations/*}/datasets', - 'body': 'dataset', - }, - ] - request, metadata = self._interceptor.pre_create_dataset(request, metadata) - pb_request = service.CreateDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gca_dataset.Dataset() - pb_resp = gca_dataset.Dataset.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_dataset(resp) - return resp - - class _CreateModel(AutoMlRestStub): - def __hash__(self): - return hash("CreateModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.CreateModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the create model method over HTTP. - - Args: - request (~.service.CreateModelRequest): - The request object. Request message for - [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{parent=projects/*/locations/*}/models', - 'body': 'model', - }, - ] - request, metadata = self._interceptor.pre_create_model(request, metadata) - pb_request = service.CreateModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_model(resp) - return resp - - class _DeleteDataset(AutoMlRestStub): - def __hash__(self): - return hash("DeleteDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.DeleteDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the delete dataset method over HTTP. - - Args: - request (~.service.DeleteDatasetRequest): - The request object. Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_dataset(request, metadata) - pb_request = service.DeleteDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_delete_dataset(resp) - return resp - - class _DeleteModel(AutoMlRestStub): - def __hash__(self): - return hash("DeleteModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.DeleteModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the delete model method over HTTP. - - Args: - request (~.service.DeleteModelRequest): - The request object. Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_model(request, metadata) - pb_request = service.DeleteModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_delete_model(resp) - return resp - - class _DeployModel(AutoMlRestStub): - def __hash__(self): - return hash("DeployModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.DeployModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the deploy model method over HTTP. - - Args: - request (~.service.DeployModelRequest): - The request object. Request message for - [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:deploy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_deploy_model(request, metadata) - pb_request = service.DeployModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_deploy_model(resp) - return resp - - class _ExportData(AutoMlRestStub): - def __hash__(self): - return hash("ExportData") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ExportDataRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the export data method over HTTP. - - Args: - request (~.service.ExportDataRequest): - The request object. Request message for - [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}:exportData', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_export_data(request, metadata) - pb_request = service.ExportDataRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_export_data(resp) - return resp - - class _ExportEvaluatedExamples(AutoMlRestStub): - def __hash__(self): - return hash("ExportEvaluatedExamples") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ExportEvaluatedExamplesRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the export evaluated examples method over HTTP. - - Args: - request (~.service.ExportEvaluatedExamplesRequest): - The request object. Request message for - [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_export_evaluated_examples(request, metadata) - pb_request = service.ExportEvaluatedExamplesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_export_evaluated_examples(resp) - return resp - - class _ExportModel(AutoMlRestStub): - def __hash__(self): - return hash("ExportModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ExportModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the export model method over HTTP. - - Args: - request (~.service.ExportModelRequest): - The request object. Request message for - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an - error code will be returned. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:export', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_export_model(request, metadata) - pb_request = service.ExportModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_export_model(resp) - return resp - - class _GetAnnotationSpec(AutoMlRestStub): - def __hash__(self): - return hash("GetAnnotationSpec") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetAnnotationSpecRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> annotation_spec.AnnotationSpec: - r"""Call the get annotation spec method over HTTP. - - Args: - request (~.service.GetAnnotationSpecRequest): - The request object. Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.annotation_spec.AnnotationSpec: - A definition of an annotation spec. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}', - }, - ] - request, metadata = self._interceptor.pre_get_annotation_spec(request, metadata) - pb_request = service.GetAnnotationSpecRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = annotation_spec.AnnotationSpec() - pb_resp = annotation_spec.AnnotationSpec.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_annotation_spec(resp) - return resp - - class _GetColumnSpec(AutoMlRestStub): - def __hash__(self): - return hash("GetColumnSpec") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetColumnSpecRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> column_spec.ColumnSpec: - r"""Call the get column spec method over HTTP. - - Args: - request (~.service.GetColumnSpecRequest): - The request object. Request message for - [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.column_spec.ColumnSpec: - A representation of a column in a relational table. When - listing them, column specs are returned in the same - order in which they were given on import . Used by: - - - Tables - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}', - }, - ] - request, metadata = self._interceptor.pre_get_column_spec(request, metadata) - pb_request = service.GetColumnSpecRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = column_spec.ColumnSpec() - pb_resp = column_spec.ColumnSpec.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_column_spec(resp) - return resp - - class _GetDataset(AutoMlRestStub): - def __hash__(self): - return hash("GetDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> dataset.Dataset: - r"""Call the get dataset method over HTTP. - - Args: - request (~.service.GetDatasetRequest): - The request object. Request message for - [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.dataset.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}', - }, - ] - request, metadata = self._interceptor.pre_get_dataset(request, metadata) - pb_request = service.GetDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = dataset.Dataset() - pb_resp = dataset.Dataset.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_dataset(resp) - return resp - - class _GetModel(AutoMlRestStub): - def __hash__(self): - return hash("GetModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> model.Model: - r"""Call the get model method over HTTP. - - Args: - request (~.service.GetModelRequest): - The request object. Request message for - [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.model.Model: - API proto representing a trained - machine learning model. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}', - }, - ] - request, metadata = self._interceptor.pre_get_model(request, metadata) - pb_request = service.GetModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = model.Model() - pb_resp = model.Model.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_model(resp) - return resp - - class _GetModelEvaluation(AutoMlRestStub): - def __hash__(self): - return hash("GetModelEvaluation") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetModelEvaluationRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> model_evaluation.ModelEvaluation: - r"""Call the get model evaluation method over HTTP. - - Args: - request (~.service.GetModelEvaluationRequest): - The request object. Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.model_evaluation.ModelEvaluation: - Evaluation results of a model. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}', - }, - ] - request, metadata = self._interceptor.pre_get_model_evaluation(request, metadata) - pb_request = service.GetModelEvaluationRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = model_evaluation.ModelEvaluation() - pb_resp = model_evaluation.ModelEvaluation.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_model_evaluation(resp) - return resp - - class _GetTableSpec(AutoMlRestStub): - def __hash__(self): - return hash("GetTableSpec") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.GetTableSpecRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> table_spec.TableSpec: - r"""Call the get table spec method over HTTP. - - Args: - request (~.service.GetTableSpecRequest): - The request object. Request message for - [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.table_spec.TableSpec: - A specification of a relational table. The table's - schema is represented via its child column specs. It is - pre-populated as part of ImportData by schema inference - algorithm, the version of which is a required parameter - of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the - data in the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a model. - Used by: - - - Tables - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}', - }, - ] - request, metadata = self._interceptor.pre_get_table_spec(request, metadata) - pb_request = service.GetTableSpecRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table_spec.TableSpec() - pb_resp = table_spec.TableSpec.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_table_spec(resp) - return resp - - class _ImportData(AutoMlRestStub): - def __hash__(self): - return hash("ImportData") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ImportDataRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the import data method over HTTP. - - Args: - request (~.service.ImportDataRequest): - The request object. Request message for - [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/datasets/*}:importData', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_import_data(request, metadata) - pb_request = service.ImportDataRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_import_data(resp) - return resp - - class _ListColumnSpecs(AutoMlRestStub): - def __hash__(self): - return hash("ListColumnSpecs") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListColumnSpecsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListColumnSpecsResponse: - r"""Call the list column specs method over HTTP. - - Args: - request (~.service.ListColumnSpecsRequest): - The request object. Request message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListColumnSpecsResponse: - Response message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs', - }, - ] - request, metadata = self._interceptor.pre_list_column_specs(request, metadata) - pb_request = service.ListColumnSpecsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListColumnSpecsResponse() - pb_resp = service.ListColumnSpecsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_column_specs(resp) - return resp - - class _ListDatasets(AutoMlRestStub): - def __hash__(self): - return hash("ListDatasets") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListDatasetsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListDatasetsResponse: - r"""Call the list datasets method over HTTP. - - Args: - request (~.service.ListDatasetsRequest): - The request object. Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListDatasetsResponse: - Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{parent=projects/*/locations/*}/datasets', - }, - ] - request, metadata = self._interceptor.pre_list_datasets(request, metadata) - pb_request = service.ListDatasetsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListDatasetsResponse() - pb_resp = service.ListDatasetsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_datasets(resp) - return resp - - class _ListModelEvaluations(AutoMlRestStub): - def __hash__(self): - return hash("ListModelEvaluations") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListModelEvaluationsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListModelEvaluationsResponse: - r"""Call the list model evaluations method over HTTP. - - Args: - request (~.service.ListModelEvaluationsRequest): - The request object. Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListModelEvaluationsResponse: - Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations', - }, - ] - request, metadata = self._interceptor.pre_list_model_evaluations(request, metadata) - pb_request = service.ListModelEvaluationsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListModelEvaluationsResponse() - pb_resp = service.ListModelEvaluationsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_model_evaluations(resp) - return resp - - class _ListModels(AutoMlRestStub): - def __hash__(self): - return hash("ListModels") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListModelsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListModelsResponse: - r"""Call the list models method over HTTP. - - Args: - request (~.service.ListModelsRequest): - The request object. Request message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListModelsResponse: - Response message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{parent=projects/*/locations/*}/models', - }, - ] - request, metadata = self._interceptor.pre_list_models(request, metadata) - pb_request = service.ListModelsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListModelsResponse() - pb_resp = service.ListModelsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_models(resp) - return resp - - class _ListTableSpecs(AutoMlRestStub): - def __hash__(self): - return hash("ListTableSpecs") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.ListTableSpecsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> service.ListTableSpecsResponse: - r"""Call the list table specs method over HTTP. - - Args: - request (~.service.ListTableSpecsRequest): - The request object. Request message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.service.ListTableSpecsResponse: - Response message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs', - }, - ] - request, metadata = self._interceptor.pre_list_table_specs(request, metadata) - pb_request = service.ListTableSpecsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = service.ListTableSpecsResponse() - pb_resp = service.ListTableSpecsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_table_specs(resp) - return resp - - class _UndeployModel(AutoMlRestStub): - def __hash__(self): - return hash("UndeployModel") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.UndeployModelRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the undeploy model method over HTTP. - - Args: - request (~.service.UndeployModelRequest): - The request object. Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:undeploy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_undeploy_model(request, metadata) - pb_request = service.UndeployModelRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_undeploy_model(resp) - return resp - - class _UpdateColumnSpec(AutoMlRestStub): - def __hash__(self): - return hash("UpdateColumnSpec") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.UpdateColumnSpecRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> gca_column_spec.ColumnSpec: - r"""Call the update column spec method over HTTP. - - Args: - request (~.service.UpdateColumnSpecRequest): - The request object. Request message for - [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.gca_column_spec.ColumnSpec: - A representation of a column in a relational table. When - listing them, column specs are returned in the same - order in which they were given on import . Used by: - - - Tables - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}', - 'body': 'column_spec', - }, - ] - request, metadata = self._interceptor.pre_update_column_spec(request, metadata) - pb_request = service.UpdateColumnSpecRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gca_column_spec.ColumnSpec() - pb_resp = gca_column_spec.ColumnSpec.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_column_spec(resp) - return resp - - class _UpdateDataset(AutoMlRestStub): - def __hash__(self): - return hash("UpdateDataset") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.UpdateDatasetRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> gca_dataset.Dataset: - r"""Call the update dataset method over HTTP. - - Args: - request (~.service.UpdateDatasetRequest): - The request object. Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.gca_dataset.Dataset: - A workspace for solving a single, - particular machine learning (ML) - problem. A workspace contains examples - that may be annotated. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}', - 'body': 'dataset', - }, - ] - request, metadata = self._interceptor.pre_update_dataset(request, metadata) - pb_request = service.UpdateDatasetRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gca_dataset.Dataset() - pb_resp = gca_dataset.Dataset.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_dataset(resp) - return resp - - class _UpdateTableSpec(AutoMlRestStub): - def __hash__(self): - return hash("UpdateTableSpec") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: service.UpdateTableSpecRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> gca_table_spec.TableSpec: - r"""Call the update table spec method over HTTP. - - Args: - request (~.service.UpdateTableSpecRequest): - The request object. Request message for - [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.gca_table_spec.TableSpec: - A specification of a relational table. The table's - schema is represented via its child column specs. It is - pre-populated as part of ImportData by schema inference - algorithm, the version of which is a required parameter - of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the - data in the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a model. - Used by: - - - Tables - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}', - 'body': 'table_spec', - }, - ] - request, metadata = self._interceptor.pre_update_table_spec(request, metadata) - pb_request = service.UpdateTableSpecRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gca_table_spec.TableSpec() - pb_resp = gca_table_spec.TableSpec.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_table_spec(resp) - return resp - - @property - def create_dataset(self) -> Callable[ - [service.CreateDatasetRequest], - gca_dataset.Dataset]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_model(self) -> Callable[ - [service.CreateModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_dataset(self) -> Callable[ - [service.DeleteDatasetRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_model(self) -> Callable[ - [service.DeleteModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def deploy_model(self) -> Callable[ - [service.DeployModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeployModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def export_data(self) -> Callable[ - [service.ExportDataRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ExportData(self._session, self._host, self._interceptor) # type: ignore - - @property - def export_evaluated_examples(self) -> Callable[ - [service.ExportEvaluatedExamplesRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ExportEvaluatedExamples(self._session, self._host, self._interceptor) # type: ignore - - @property - def export_model(self) -> Callable[ - [service.ExportModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ExportModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_annotation_spec(self) -> Callable[ - [service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetAnnotationSpec(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_column_spec(self) -> Callable[ - [service.GetColumnSpecRequest], - column_spec.ColumnSpec]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetColumnSpec(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_dataset(self) -> Callable[ - [service.GetDatasetRequest], - dataset.Dataset]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_model(self) -> Callable[ - [service.GetModelRequest], - model.Model]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_model_evaluation(self) -> Callable[ - [service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetModelEvaluation(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_table_spec(self) -> Callable[ - [service.GetTableSpecRequest], - table_spec.TableSpec]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetTableSpec(self._session, self._host, self._interceptor) # type: ignore - - @property - def import_data(self) -> Callable[ - [service.ImportDataRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ImportData(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_column_specs(self) -> Callable[ - [service.ListColumnSpecsRequest], - service.ListColumnSpecsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListColumnSpecs(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_datasets(self) -> Callable[ - [service.ListDatasetsRequest], - service.ListDatasetsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListDatasets(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_model_evaluations(self) -> Callable[ - [service.ListModelEvaluationsRequest], - service.ListModelEvaluationsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListModelEvaluations(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_models(self) -> Callable[ - [service.ListModelsRequest], - service.ListModelsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListModels(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_table_specs(self) -> Callable[ - [service.ListTableSpecsRequest], - service.ListTableSpecsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListTableSpecs(self._session, self._host, self._interceptor) # type: ignore - - @property - def undeploy_model(self) -> Callable[ - [service.UndeployModelRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UndeployModel(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_column_spec(self) -> Callable[ - [service.UpdateColumnSpecRequest], - gca_column_spec.ColumnSpec]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateColumnSpec(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_dataset(self) -> Callable[ - [service.UpdateDatasetRequest], - gca_dataset.Dataset]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateDataset(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_table_spec(self) -> Callable[ - [service.UpdateTableSpecRequest], - gca_table_spec.TableSpec]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateTableSpec(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__=( - 'AutoMlRestTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py deleted file mode 100644 index 905b8c43..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PredictionServiceClient -from .async_client import PredictionServiceAsyncClient - -__all__ = ( - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py deleted file mode 100644 index 6144f9b3..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/async_client.py +++ /dev/null @@ -1,621 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union - -from google.cloud.automl_v1beta1 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1beta1.types import annotation_payload -from google.cloud.automl_v1beta1.types import data_items -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import operations -from google.cloud.automl_v1beta1.types import prediction_service -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .client import PredictionServiceClient - - -class PredictionServiceAsyncClient: - """AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - _client: PredictionServiceClient - - DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT - - model_path = staticmethod(PredictionServiceClient.model_path) - parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) - common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PredictionServiceClient.common_project_path) - parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) - common_location_path = staticmethod(PredictionServiceClient.common_location_path) - parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return PredictionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PredictionServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def predict(self, - request: Optional[Union[prediction_service.PredictRequest, dict]] = None, - *, - name: Optional[str] = None, - payload: Optional[data_items.ExamplePayload] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. The prediction result will be - directly returned in the response. Available for following ML - problems, and their expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 - characters, UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 - characters, UTF-8 NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, - UTF-8 encoded. - - Tables - Row, with column values matching the columns of the - model, up to 5MB. Not available for FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. - - - Text Sentiment - TextSnippet, content up 500 characters, - UTF-8 encoded. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - payload = automl_v1beta1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1beta1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = await client.predict(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.PredictRequest, dict]]): - The request object. Request message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - name (:class:`str`): - Required. Name of the model requested - to serve the prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - payload (:class:`google.cloud.automl_v1beta1.types.ExamplePayload`): - Required. Payload to perform a - prediction on. The payload must match - the problem type that the model was - trained to solve. - - This corresponds to the ``payload`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (:class:`MutableMapping[str, str]`): - Additional domain-specific parameters, any string must - be up to 25000 characters long. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for an image, - it will only produce results that have at least this - confidence score. The default is 0.5. - - - For Image Object Detection: ``score_threshold`` - - (float) When Model detects objects on the image, it - will only produce bounding boxes which have at least - this confidence score. Value in 0 to 1 range, default - is 0.5. ``max_bounding_box_count`` - (int64) No more - than this number of bounding boxes will be returned - in the response. Default is 100, the requested value - may be limited by server. - - - For Tables: feature_importance - (boolean) Whether - feature importance should be populated in the - returned TablesAnnotation. The default is false. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, payload, params]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.PredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if payload is not None: - request.payload = payload - - if params: - request.params.update(params) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.predict, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_predict(self, - request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.BatchPredictInputConfig] = None, - output_config: Optional[io.BatchPredictOutputConfig] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Video Classification - - Video Object Tracking \* Text Extraction - - Tables - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - async def sample_batch_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.BatchPredictRequest( - name="name_value", - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.automl_v1beta1.types.BatchPredictRequest, dict]]): - The request object. Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - name (:class:`str`): - Required. Name of the model requested - to serve the batch prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (:class:`google.cloud.automl_v1beta1.types.BatchPredictInputConfig`): - Required. The input configuration for - batch prediction. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.automl_v1beta1.types.BatchPredictOutputConfig`): - Required. The Configuration - specifying where output predictions - should be written. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (:class:`MutableMapping[str, str]`): - Required. Additional domain-specific parameters for the - predictions, any string must be up to 25000 characters - long. - - - For Text Classification: - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for a text - snippet, it will only produce results that have at - least this confidence score. The default is 0.5. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for an image, - it will only produce results that have at least this - confidence score. The default is 0.5. - - - For Image Object Detection: - - ``score_threshold`` - (float) When Model detects - objects on the image, it will only produce bounding - boxes which have at least this confidence score. - Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be produced per - image. Default is 100, the requested value may be - limited by server. - - - For Video Classification : - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for a video, it - will only produce results that have at least this - confidence score. The default is 0.5. - ``segment_classification`` - (boolean) Set to true to - request segment-level classification. AutoML Video - Intelligence returns labels and their confidence - scores for the entire segment of the video that user - specified in the request configuration. The default - is "true". ``shot_classification`` - (boolean) Set to - true to request shot-level classification. AutoML - Video Intelligence determines the boundaries for each - camera shot in the entire segment of the video that - user specified in the request configuration. AutoML - Video Intelligence then returns labels and their - confidence scores for each detected shot, along with - the start and end time of the shot. WARNING: Model - evaluation is not done for this classification type, - the quality of it depends on training data, but there - are no metrics provided to describe that quality. The - default is "false". ``1s_interval_classification`` - - (boolean) Set to true to request classification for a - video at one-second intervals. AutoML Video - Intelligence returns labels and their confidence - scores for each second of the entire segment of the - video that user specified in the request - configuration. WARNING: Model evaluation is not done - for this classification type, the quality of it - depends on training data, but there are no metrics - provided to describe that quality. The default is - "false". - - - For Tables: - - feature_importance - (boolean) Whether feature - importance should be populated in the returned - TablesAnnotations. The default is false. - - - For Video Object Tracking: - - ``score_threshold`` - (float) When Model detects - objects on video frames, it will only produce - bounding boxes which have at least this confidence - score. Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be returned per - frame. Default is 100, the requested value may be - limited by server. ``min_bounding_box_size`` - - (float) Only bounding boxes with shortest edge at - least that long as a relative value of video frame - size will be returned. Value in 0 to 1 range. Default - is 0. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.automl_v1beta1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of - the operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config, output_config, params]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.BatchPredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - if output_config is not None: - request.output_config = output_config - - if params: - request.params.update(params) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_predict, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - prediction_service.BatchPredictResult, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self) -> "PredictionServiceAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "PredictionServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py deleted file mode 100644 index 7894085e..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/client.py +++ /dev/null @@ -1,823 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast - -from google.cloud.automl_v1beta1 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.automl_v1beta1.types import annotation_payload -from google.cloud.automl_v1beta1.types import data_items -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import operations -from google.cloud.automl_v1beta1.types import prediction_service -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PredictionServiceGrpcTransport -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .transports.rest import PredictionServiceRestTransport - - -class PredictionServiceClientMeta(type): - """Metaclass for the PredictionService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] - _transport_registry["grpc"] = PredictionServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport - _transport_registry["rest"] = PredictionServiceRestTransport - - def get_transport_class(cls, - label: Optional[str] = None, - ) -> Type[PredictionServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PredictionServiceClient(metaclass=PredictionServiceClientMeta): - """AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "automl.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, PredictionServiceTransport]] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) - - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) - - api_key_value = getattr(client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError("client_options.api_key and credentials are mutually exclusive") - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PredictionServiceTransport): - # transport is a PredictionServiceTransport instance. - if credentials or client_options.credentials_file or api_key_value: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - import google.auth._default # type: ignore - - if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): - credentials = google.auth._default.get_api_key_credentials(api_key_value) - - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=client_options.api_audience, - ) - - def predict(self, - request: Optional[Union[prediction_service.PredictRequest, dict]] = None, - *, - name: Optional[str] = None, - payload: Optional[data_items.ExamplePayload] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. The prediction result will be - directly returned in the response. Available for following ML - problems, and their expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 - characters, UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 - characters, UTF-8 NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, - UTF-8 encoded. - - Tables - Row, with column values matching the columns of the - model, up to 5MB. Not available for FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. - - - Text Sentiment - TextSnippet, content up 500 characters, - UTF-8 encoded. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceClient() - - # Initialize request argument(s) - payload = automl_v1beta1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1beta1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = client.predict(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.PredictRequest, dict]): - The request object. Request message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - name (str): - Required. Name of the model requested - to serve the prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - payload (google.cloud.automl_v1beta1.types.ExamplePayload): - Required. Payload to perform a - prediction on. The payload must match - the problem type that the model was - trained to solve. - - This corresponds to the ``payload`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (MutableMapping[str, str]): - Additional domain-specific parameters, any string must - be up to 25000 characters long. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for an image, - it will only produce results that have at least this - confidence score. The default is 0.5. - - - For Image Object Detection: ``score_threshold`` - - (float) When Model detects objects on the image, it - will only produce bounding boxes which have at least - this confidence score. Value in 0 to 1 range, default - is 0.5. ``max_bounding_box_count`` - (int64) No more - than this number of bounding boxes will be returned - in the response. Default is 100, the requested value - may be limited by server. - - - For Tables: feature_importance - (boolean) Whether - feature importance should be populated in the - returned TablesAnnotation. The default is false. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.automl_v1beta1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, payload, params]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.PredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.PredictRequest): - request = prediction_service.PredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if payload is not None: - request.payload = payload - if params is not None: - request.params = params - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_predict(self, - request: Optional[Union[prediction_service.BatchPredictRequest, dict]] = None, - *, - name: Optional[str] = None, - input_config: Optional[io.BatchPredictInputConfig] = None, - output_config: Optional[io.BatchPredictOutputConfig] = None, - params: Optional[MutableMapping[str, str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Video Classification - - Video Object Tracking \* Text Extraction - - Tables - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import automl_v1beta1 - - def sample_batch_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceClient() - - # Initialize request argument(s) - request = automl_v1beta1.BatchPredictRequest( - name="name_value", - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.automl_v1beta1.types.BatchPredictRequest, dict]): - The request object. Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - name (str): - Required. Name of the model requested - to serve the batch prediction. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): - Required. The input configuration for - batch prediction. - - This corresponds to the ``input_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.automl_v1beta1.types.BatchPredictOutputConfig): - Required. The Configuration - specifying where output predictions - should be written. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - params (MutableMapping[str, str]): - Required. Additional domain-specific parameters for the - predictions, any string must be up to 25000 characters - long. - - - For Text Classification: - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for a text - snippet, it will only produce results that have at - least this confidence score. The default is 0.5. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for an image, - it will only produce results that have at least this - confidence score. The default is 0.5. - - - For Image Object Detection: - - ``score_threshold`` - (float) When Model detects - objects on the image, it will only produce bounding - boxes which have at least this confidence score. - Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be produced per - image. Default is 100, the requested value may be - limited by server. - - - For Video Classification : - - ``score_threshold`` - (float) A value from 0.0 to - 1.0. When the model makes predictions for a video, it - will only produce results that have at least this - confidence score. The default is 0.5. - ``segment_classification`` - (boolean) Set to true to - request segment-level classification. AutoML Video - Intelligence returns labels and their confidence - scores for the entire segment of the video that user - specified in the request configuration. The default - is "true". ``shot_classification`` - (boolean) Set to - true to request shot-level classification. AutoML - Video Intelligence determines the boundaries for each - camera shot in the entire segment of the video that - user specified in the request configuration. AutoML - Video Intelligence then returns labels and their - confidence scores for each detected shot, along with - the start and end time of the shot. WARNING: Model - evaluation is not done for this classification type, - the quality of it depends on training data, but there - are no metrics provided to describe that quality. The - default is "false". ``1s_interval_classification`` - - (boolean) Set to true to request classification for a - video at one-second intervals. AutoML Video - Intelligence returns labels and their confidence - scores for each second of the entire segment of the - video that user specified in the request - configuration. WARNING: Model evaluation is not done - for this classification type, the quality of it - depends on training data, but there are no metrics - provided to describe that quality. The default is - "false". - - - For Tables: - - feature_importance - (boolean) Whether feature - importance should be populated in the returned - TablesAnnotations. The default is false. - - - For Video Object Tracking: - - ``score_threshold`` - (float) When Model detects - objects on video frames, it will only produce - bounding boxes which have at least this confidence - score. Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be returned per - frame. Default is 100, the requested value may be - limited by server. ``min_bounding_box_size`` - - (float) Only bounding boxes with shortest edge at - least that long as a relative value of video frame - size will be returned. Value in 0 to 1 range. Default - is 0. - - This corresponds to the ``params`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.automl_v1beta1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of - the operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, input_config, output_config, params]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.BatchPredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.BatchPredictRequest): - request = prediction_service.BatchPredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if input_config is not None: - request.input_config = input_config - if output_config is not None: - request.output_config = output_config - if params is not None: - request.params = params - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - prediction_service.BatchPredictResult, - metadata_type=operations.OperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self) -> "PredictionServiceClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - - - - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "PredictionServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py deleted file mode 100644 index d8c81688..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PredictionServiceTransport -from .grpc import PredictionServiceGrpcTransport -from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .rest import PredictionServiceRestTransport -from .rest import PredictionServiceRestInterceptor - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] -_transport_registry['grpc'] = PredictionServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport -_transport_registry['rest'] = PredictionServiceRestTransport - -__all__ = ( - 'PredictionServiceTransport', - 'PredictionServiceGrpcTransport', - 'PredictionServiceGrpcAsyncIOTransport', - 'PredictionServiceRestTransport', - 'PredictionServiceRestInterceptor', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py deleted file mode 100644 index b166dafa..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.automl_v1beta1 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.automl_v1beta1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -class PredictionServiceTransport(abc.ABC): - """Abstract transport class for PredictionService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'automl.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience(api_audience if api_audience else host) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.predict: gapic_v1.method.wrap_method( - self.predict, - default_timeout=60.0, - client_info=client_info, - ), - self.batch_predict: gapic_v1.method.wrap_method( - self.batch_predict, - default_timeout=60.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Union[ - prediction_service.PredictResponse, - Awaitable[prediction_service.PredictResponse] - ]]: - raise NotImplementedError() - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ( - 'PredictionServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py deleted file mode 100644 index 79482505..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py +++ /dev/null @@ -1,348 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.automl_v1beta1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO - - -class PredictionServiceGrpcTransport(PredictionServiceTransport): - """gRPC backend transport for PredictionService. - - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. The prediction result will be - directly returned in the response. Available for following ML - problems, and their expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 - characters, UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 - characters, UTF-8 NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, - UTF-8 encoded. - - Tables - Row, with column values matching the columns of the - model, up to 5MB. Not available for FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. - - - Text Sentiment - TextSnippet, content up 500 characters, - UTF-8 encoded. - - Returns: - Callable[[~.PredictRequest], - ~.PredictResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch predict method over gRPC. - - Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Video Classification - - Video Object Tracking \* Text Extraction - - Tables - - Returns: - Callable[[~.BatchPredictRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_predict' not in self._stubs: - self._stubs['batch_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.PredictionService/BatchPredict', - request_serializer=prediction_service.BatchPredictRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_predict'] - - def close(self): - self.grpc_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ( - 'PredictionServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py deleted file mode 100644 index ba37665a..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.automl_v1beta1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PredictionServiceGrpcTransport - - -class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): - """gRPC AsyncIO backend transport for PredictionService. - - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Awaitable[prediction_service.PredictResponse]]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. The prediction result will be - directly returned in the response. Available for following ML - problems, and their expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 - characters, UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 - characters, UTF-8 NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, - UTF-8 encoded. - - Tables - Row, with column values matching the columns of the - model, up to 5MB. Not available for FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. - - - Text Sentiment - TextSnippet, content up 500 characters, - UTF-8 encoded. - - Returns: - Callable[[~.PredictRequest], - Awaitable[~.PredictResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch predict method over gRPC. - - Perform a batch prediction. Unlike the online - [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], - batch prediction result won't be immediately available in the - response. Instead, a long running operation object is returned. - User can poll the operation result via - [GetOperation][google.longrunning.Operations.GetOperation] - method. Once the operation is done, - [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] - is returned in the - [response][google.longrunning.Operation.response] field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Video Classification - - Video Object Tracking \* Text Extraction - - Tables - - Returns: - Callable[[~.BatchPredictRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_predict' not in self._stubs: - self._stubs['batch_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.automl.v1beta1.PredictionService/BatchPredict', - request_serializer=prediction_service.BatchPredictRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_predict'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py deleted file mode 100644 index 3bd06e82..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/services/prediction_service/transports/rest.py +++ /dev/null @@ -1,484 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from google.api_core import operations_v1 -from requests import __version__ as requests_version -import dataclasses -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - - -from google.cloud.automl_v1beta1.types import prediction_service -from google.longrunning import operations_pb2 # type: ignore - -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=requests_version, -) - - -class PredictionServiceRestInterceptor: - """Interceptor for PredictionService. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the PredictionServiceRestTransport. - - .. code-block:: python - class MyCustomPredictionServiceInterceptor(PredictionServiceRestInterceptor): - def pre_batch_predict(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_batch_predict(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_predict(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_predict(self, response): - logging.log(f"Received response: {response}") - return response - - transport = PredictionServiceRestTransport(interceptor=MyCustomPredictionServiceInterceptor()) - client = PredictionServiceClient(transport=transport) - - - """ - def pre_batch_predict(self, request: prediction_service.BatchPredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.BatchPredictRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for batch_predict - - Override in a subclass to manipulate the request or metadata - before they are sent to the PredictionService server. - """ - return request, metadata - - def post_batch_predict(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for batch_predict - - Override in a subclass to manipulate the response - after it is returned by the PredictionService server but before - it is returned to user code. - """ - return response - def pre_predict(self, request: prediction_service.PredictRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[prediction_service.PredictRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for predict - - Override in a subclass to manipulate the request or metadata - before they are sent to the PredictionService server. - """ - return request, metadata - - def post_predict(self, response: prediction_service.PredictResponse) -> prediction_service.PredictResponse: - """Post-rpc interceptor for predict - - Override in a subclass to manipulate the response - after it is returned by the PredictionService server but before - it is returned to user code. - """ - return response - - -@dataclasses.dataclass -class PredictionServiceRestStub: - _session: AuthorizedSession - _host: str - _interceptor: PredictionServiceRestInterceptor - - -class PredictionServiceRestTransport(PredictionServiceTransport): - """REST backend transport for PredictionService. - - AutoML Prediction API. - - On any input that is documented to expect a string parameter in - snake_case or kebab-case, either of those cases is accepted. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - - """ - - def __init__(self, *, - host: str = 'automl.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[ - ], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = 'https', - interceptor: Optional[PredictionServiceRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or PredictionServiceRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - 'google.longrunning.Operations.CancelOperation': [ - { - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:cancel', - 'body': '*', - }, - ], - 'google.longrunning.Operations.DeleteOperation': [ - { - 'method': 'delete', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.GetOperation': [ - { - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}', - }, - ], - 'google.longrunning.Operations.ListOperations': [ - { - 'method': 'get', - 'uri': '/v1beta1/{name=projects/*/locations/*}/operations', - }, - ], - 'google.longrunning.Operations.WaitOperation': [ - { - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/operations/*}:wait', - 'body': '*', - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v1beta1") - - self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) - - # Return the client from cache. - return self._operations_client - - class _BatchPredict(PredictionServiceRestStub): - def __hash__(self): - return hash("BatchPredict") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: prediction_service.BatchPredictRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the batch predict method over HTTP. - - Args: - request (~.prediction_service.BatchPredictRequest): - The request object. Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_batch_predict(request, metadata) - pb_request = prediction_service.BatchPredictRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_batch_predict(resp) - return resp - - class _Predict(PredictionServiceRestStub): - def __hash__(self): - return hash("Predict") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: prediction_service.PredictRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> prediction_service.PredictResponse: - r"""Call the predict method over HTTP. - - Args: - request (~.prediction_service.PredictRequest): - The request object. Request message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.prediction_service.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v1beta1/{name=projects/*/locations/*/models/*}:predict', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_predict(request, metadata) - pb_request = prediction_service.PredictRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = prediction_service.PredictResponse() - pb_resp = prediction_service.PredictResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_predict(resp) - return resp - - @property - def batch_predict(self) -> Callable[ - [prediction_service.BatchPredictRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._BatchPredict(self._session, self._host, self._interceptor) # type: ignore - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._Predict(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__=( - 'PredictionServiceRestTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py deleted file mode 100644 index c5985ccf..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/__init__.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .annotation_payload import ( - AnnotationPayload, -) -from .annotation_spec import ( - AnnotationSpec, -) -from .classification import ( - ClassificationAnnotation, - ClassificationEvaluationMetrics, - VideoClassificationAnnotation, - ClassificationType, -) -from .column_spec import ( - ColumnSpec, -) -from .data_items import ( - Document, - DocumentDimensions, - ExamplePayload, - Image, - Row, - TextSnippet, -) -from .data_stats import ( - ArrayStats, - CategoryStats, - CorrelationStats, - DataStats, - Float64Stats, - StringStats, - StructStats, - TimestampStats, -) -from .data_types import ( - DataType, - StructType, - TypeCode, -) -from .dataset import ( - Dataset, -) -from .detection import ( - BoundingBoxMetricsEntry, - ImageObjectDetectionAnnotation, - ImageObjectDetectionEvaluationMetrics, - VideoObjectTrackingAnnotation, - VideoObjectTrackingEvaluationMetrics, -) -from .geometry import ( - BoundingPoly, - NormalizedVertex, -) -from .image import ( - ImageClassificationDatasetMetadata, - ImageClassificationModelDeploymentMetadata, - ImageClassificationModelMetadata, - ImageObjectDetectionDatasetMetadata, - ImageObjectDetectionModelDeploymentMetadata, - ImageObjectDetectionModelMetadata, -) -from .io import ( - BatchPredictInputConfig, - BatchPredictOutputConfig, - BigQueryDestination, - BigQuerySource, - DocumentInputConfig, - ExportEvaluatedExamplesOutputConfig, - GcrDestination, - GcsDestination, - GcsSource, - InputConfig, - ModelExportOutputConfig, - OutputConfig, -) -from .model import ( - Model, -) -from .model_evaluation import ( - ModelEvaluation, -) -from .operations import ( - BatchPredictOperationMetadata, - CreateModelOperationMetadata, - DeleteOperationMetadata, - DeployModelOperationMetadata, - ExportDataOperationMetadata, - ExportEvaluatedExamplesOperationMetadata, - ExportModelOperationMetadata, - ImportDataOperationMetadata, - OperationMetadata, - UndeployModelOperationMetadata, -) -from .prediction_service import ( - BatchPredictRequest, - BatchPredictResult, - PredictRequest, - PredictResponse, -) -from .ranges import ( - DoubleRange, -) -from .regression import ( - RegressionEvaluationMetrics, -) -from .service import ( - CreateDatasetRequest, - CreateModelRequest, - DeleteDatasetRequest, - DeleteModelRequest, - DeployModelRequest, - ExportDataRequest, - ExportEvaluatedExamplesRequest, - ExportModelRequest, - GetAnnotationSpecRequest, - GetColumnSpecRequest, - GetDatasetRequest, - GetModelEvaluationRequest, - GetModelRequest, - GetTableSpecRequest, - ImportDataRequest, - ListColumnSpecsRequest, - ListColumnSpecsResponse, - ListDatasetsRequest, - ListDatasetsResponse, - ListModelEvaluationsRequest, - ListModelEvaluationsResponse, - ListModelsRequest, - ListModelsResponse, - ListTableSpecsRequest, - ListTableSpecsResponse, - UndeployModelRequest, - UpdateColumnSpecRequest, - UpdateDatasetRequest, - UpdateTableSpecRequest, -) -from .table_spec import ( - TableSpec, -) -from .tables import ( - TablesAnnotation, - TablesDatasetMetadata, - TablesModelColumnInfo, - TablesModelMetadata, -) -from .temporal import ( - TimeSegment, -) -from .text import ( - TextClassificationDatasetMetadata, - TextClassificationModelMetadata, - TextExtractionDatasetMetadata, - TextExtractionModelMetadata, - TextSentimentDatasetMetadata, - TextSentimentModelMetadata, -) -from .text_extraction import ( - TextExtractionAnnotation, - TextExtractionEvaluationMetrics, -) -from .text_segment import ( - TextSegment, -) -from .text_sentiment import ( - TextSentimentAnnotation, - TextSentimentEvaluationMetrics, -) -from .translation import ( - TranslationAnnotation, - TranslationDatasetMetadata, - TranslationEvaluationMetrics, - TranslationModelMetadata, -) -from .video import ( - VideoClassificationDatasetMetadata, - VideoClassificationModelMetadata, - VideoObjectTrackingDatasetMetadata, - VideoObjectTrackingModelMetadata, -) - -__all__ = ( - 'AnnotationPayload', - 'AnnotationSpec', - 'ClassificationAnnotation', - 'ClassificationEvaluationMetrics', - 'VideoClassificationAnnotation', - 'ClassificationType', - 'ColumnSpec', - 'Document', - 'DocumentDimensions', - 'ExamplePayload', - 'Image', - 'Row', - 'TextSnippet', - 'ArrayStats', - 'CategoryStats', - 'CorrelationStats', - 'DataStats', - 'Float64Stats', - 'StringStats', - 'StructStats', - 'TimestampStats', - 'DataType', - 'StructType', - 'TypeCode', - 'Dataset', - 'BoundingBoxMetricsEntry', - 'ImageObjectDetectionAnnotation', - 'ImageObjectDetectionEvaluationMetrics', - 'VideoObjectTrackingAnnotation', - 'VideoObjectTrackingEvaluationMetrics', - 'BoundingPoly', - 'NormalizedVertex', - 'ImageClassificationDatasetMetadata', - 'ImageClassificationModelDeploymentMetadata', - 'ImageClassificationModelMetadata', - 'ImageObjectDetectionDatasetMetadata', - 'ImageObjectDetectionModelDeploymentMetadata', - 'ImageObjectDetectionModelMetadata', - 'BatchPredictInputConfig', - 'BatchPredictOutputConfig', - 'BigQueryDestination', - 'BigQuerySource', - 'DocumentInputConfig', - 'ExportEvaluatedExamplesOutputConfig', - 'GcrDestination', - 'GcsDestination', - 'GcsSource', - 'InputConfig', - 'ModelExportOutputConfig', - 'OutputConfig', - 'Model', - 'ModelEvaluation', - 'BatchPredictOperationMetadata', - 'CreateModelOperationMetadata', - 'DeleteOperationMetadata', - 'DeployModelOperationMetadata', - 'ExportDataOperationMetadata', - 'ExportEvaluatedExamplesOperationMetadata', - 'ExportModelOperationMetadata', - 'ImportDataOperationMetadata', - 'OperationMetadata', - 'UndeployModelOperationMetadata', - 'BatchPredictRequest', - 'BatchPredictResult', - 'PredictRequest', - 'PredictResponse', - 'DoubleRange', - 'RegressionEvaluationMetrics', - 'CreateDatasetRequest', - 'CreateModelRequest', - 'DeleteDatasetRequest', - 'DeleteModelRequest', - 'DeployModelRequest', - 'ExportDataRequest', - 'ExportEvaluatedExamplesRequest', - 'ExportModelRequest', - 'GetAnnotationSpecRequest', - 'GetColumnSpecRequest', - 'GetDatasetRequest', - 'GetModelEvaluationRequest', - 'GetModelRequest', - 'GetTableSpecRequest', - 'ImportDataRequest', - 'ListColumnSpecsRequest', - 'ListColumnSpecsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'ListTableSpecsRequest', - 'ListTableSpecsResponse', - 'UndeployModelRequest', - 'UpdateColumnSpecRequest', - 'UpdateDatasetRequest', - 'UpdateTableSpecRequest', - 'TableSpec', - 'TablesAnnotation', - 'TablesDatasetMetadata', - 'TablesModelColumnInfo', - 'TablesModelMetadata', - 'TimeSegment', - 'TextClassificationDatasetMetadata', - 'TextClassificationModelMetadata', - 'TextExtractionDatasetMetadata', - 'TextExtractionModelMetadata', - 'TextSentimentDatasetMetadata', - 'TextSentimentModelMetadata', - 'TextExtractionAnnotation', - 'TextExtractionEvaluationMetrics', - 'TextSegment', - 'TextSentimentAnnotation', - 'TextSentimentEvaluationMetrics', - 'TranslationAnnotation', - 'TranslationDatasetMetadata', - 'TranslationEvaluationMetrics', - 'TranslationModelMetadata', - 'VideoClassificationDatasetMetadata', - 'VideoClassificationModelMetadata', - 'VideoObjectTrackingDatasetMetadata', - 'VideoObjectTrackingModelMetadata', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py deleted file mode 100644 index 6e93ea5a..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_payload.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import classification as gca_classification -from google.cloud.automl_v1beta1.types import detection -from google.cloud.automl_v1beta1.types import tables as gca_tables -from google.cloud.automl_v1beta1.types import text_extraction as gca_text_extraction -from google.cloud.automl_v1beta1.types import text_sentiment as gca_text_sentiment -from google.cloud.automl_v1beta1.types import translation as gca_translation - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'AnnotationPayload', - }, -) - - -class AnnotationPayload(proto.Message): - r"""Contains annotation information that is relevant to AutoML. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - translation (google.cloud.automl_v1beta1.types.TranslationAnnotation): - Annotation details for translation. - - This field is a member of `oneof`_ ``detail``. - classification (google.cloud.automl_v1beta1.types.ClassificationAnnotation): - Annotation details for content or image - classification. - - This field is a member of `oneof`_ ``detail``. - image_object_detection (google.cloud.automl_v1beta1.types.ImageObjectDetectionAnnotation): - Annotation details for image object - detection. - - This field is a member of `oneof`_ ``detail``. - video_classification (google.cloud.automl_v1beta1.types.VideoClassificationAnnotation): - Annotation details for video classification. - Returned for Video Classification predictions. - - This field is a member of `oneof`_ ``detail``. - video_object_tracking (google.cloud.automl_v1beta1.types.VideoObjectTrackingAnnotation): - Annotation details for video object tracking. - - This field is a member of `oneof`_ ``detail``. - text_extraction (google.cloud.automl_v1beta1.types.TextExtractionAnnotation): - Annotation details for text extraction. - - This field is a member of `oneof`_ ``detail``. - text_sentiment (google.cloud.automl_v1beta1.types.TextSentimentAnnotation): - Annotation details for text sentiment. - - This field is a member of `oneof`_ ``detail``. - tables (google.cloud.automl_v1beta1.types.TablesAnnotation): - Annotation details for Tables. - - This field is a member of `oneof`_ ``detail``. - annotation_spec_id (str): - Output only . The resource ID of the - annotation spec that this annotation pertains - to. The annotation spec comes from either an - ancestor dataset, or the dataset that was used - to train the model in use. - display_name (str): - Output only. The value of - [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] - when the model was trained. Because this field returns a - value at model training time, for different models trained - using the same dataset, the returned value could be - different as model owner could update the ``display_name`` - between any two model training. - """ - - translation: gca_translation.TranslationAnnotation = proto.Field( - proto.MESSAGE, - number=2, - oneof='detail', - message=gca_translation.TranslationAnnotation, - ) - classification: gca_classification.ClassificationAnnotation = proto.Field( - proto.MESSAGE, - number=3, - oneof='detail', - message=gca_classification.ClassificationAnnotation, - ) - image_object_detection: detection.ImageObjectDetectionAnnotation = proto.Field( - proto.MESSAGE, - number=4, - oneof='detail', - message=detection.ImageObjectDetectionAnnotation, - ) - video_classification: gca_classification.VideoClassificationAnnotation = proto.Field( - proto.MESSAGE, - number=9, - oneof='detail', - message=gca_classification.VideoClassificationAnnotation, - ) - video_object_tracking: detection.VideoObjectTrackingAnnotation = proto.Field( - proto.MESSAGE, - number=8, - oneof='detail', - message=detection.VideoObjectTrackingAnnotation, - ) - text_extraction: gca_text_extraction.TextExtractionAnnotation = proto.Field( - proto.MESSAGE, - number=6, - oneof='detail', - message=gca_text_extraction.TextExtractionAnnotation, - ) - text_sentiment: gca_text_sentiment.TextSentimentAnnotation = proto.Field( - proto.MESSAGE, - number=7, - oneof='detail', - message=gca_text_sentiment.TextSentimentAnnotation, - ) - tables: gca_tables.TablesAnnotation = proto.Field( - proto.MESSAGE, - number=10, - oneof='detail', - message=gca_tables.TablesAnnotation, - ) - annotation_spec_id: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py deleted file mode 100644 index 4cd2e1dc..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/annotation_spec.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'AnnotationSpec', - }, -) - - -class AnnotationSpec(proto.Message): - r"""A definition of an annotation spec. - - Attributes: - name (str): - Output only. Resource name of the annotation spec. Form: - - 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' - display_name (str): - Required. The name of the annotation spec to show in the - interface. The name can be up to 32 characters long and must - match the regexp ``[a-zA-Z0-9_]+``. - example_count (int): - Output only. The number of examples in the - parent dataset labeled by the annotation spec. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - example_count: int = proto.Field( - proto.INT32, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py deleted file mode 100644 index 82b43c9f..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/classification.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import temporal - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'ClassificationType', - 'ClassificationAnnotation', - 'VideoClassificationAnnotation', - 'ClassificationEvaluationMetrics', - }, -) - - -class ClassificationType(proto.Enum): - r"""Type of the classification problem. - - Values: - CLASSIFICATION_TYPE_UNSPECIFIED (0): - An un-set value of this enum. - MULTICLASS (1): - At most one label is allowed per example. - MULTILABEL (2): - Multiple labels are allowed for one example. - """ - CLASSIFICATION_TYPE_UNSPECIFIED = 0 - MULTICLASS = 1 - MULTILABEL = 2 - - -class ClassificationAnnotation(proto.Message): - r"""Contains annotation details specific to classification. - - Attributes: - score (float): - Output only. A confidence estimate between - 0.0 and 1.0. A higher value means greater - confidence that the annotation is positive. If a - user approves an annotation as negative or - positive, the score value remains unchanged. If - a user creates an annotation, the score is 0 for - negative or 1 for positive. - """ - - score: float = proto.Field( - proto.FLOAT, - number=1, - ) - - -class VideoClassificationAnnotation(proto.Message): - r"""Contains annotation details specific to video classification. - - Attributes: - type_ (str): - Output only. Expresses the type of video classification. - Possible values: - - - ``segment`` - Classification done on a specified by user - time segment of a video. AnnotationSpec is answered to be - present in that time segment, if it is present in any - part of it. The video ML model evaluations are done only - for this type of classification. - - - ``shot``- Shot-level classification. AutoML Video - Intelligence determines the boundaries for each camera - shot in the entire segment of the video that user - specified in the request configuration. AutoML Video - Intelligence then returns labels and their confidence - scores for each detected shot, along with the start and - end time of the shot. WARNING: Model evaluation is not - done for this classification type, the quality of it - depends on training data, but there are no metrics - provided to describe that quality. - - - ``1s_interval`` - AutoML Video Intelligence returns - labels and their confidence scores for each second of the - entire segment of the video that user specified in the - request configuration. WARNING: Model evaluation is not - done for this classification type, the quality of it - depends on training data, but there are no metrics - provided to describe that quality. - classification_annotation (google.cloud.automl_v1beta1.types.ClassificationAnnotation): - Output only . The classification details of - this annotation. - time_segment (google.cloud.automl_v1beta1.types.TimeSegment): - Output only . The time segment of the video - to which the annotation applies. - """ - - type_: str = proto.Field( - proto.STRING, - number=1, - ) - classification_annotation: 'ClassificationAnnotation' = proto.Field( - proto.MESSAGE, - number=2, - message='ClassificationAnnotation', - ) - time_segment: temporal.TimeSegment = proto.Field( - proto.MESSAGE, - number=3, - message=temporal.TimeSegment, - ) - - -class ClassificationEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for classification problems. Note: For - Video Classification this metrics only describe quality of the Video - Classification predictions of "segment_classification" type. - - Attributes: - au_prc (float): - Output only. The Area Under Precision-Recall - Curve metric. Micro-averaged for the overall - evaluation. - base_au_prc (float): - Output only. The Area Under Precision-Recall - Curve metric based on priors. Micro-averaged for - the overall evaluation. Deprecated. - au_roc (float): - Output only. The Area Under Receiver - Operating Characteristic curve metric. - Micro-averaged for the overall evaluation. - log_loss (float): - Output only. The Log Loss metric. - confidence_metrics_entry (MutableSequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): - Output only. Metrics for each confidence_threshold in - 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and - position_threshold = INT32_MAX_VALUE. ROC and - precision-recall curves, and other aggregated metrics are - derived from them. The confidence metrics entries may also - be supplied for additional values of position_threshold, but - from these no aggregated metrics are computed. - confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix): - Output only. Confusion matrix of the - evaluation. Only set for MULTICLASS - classification problems where number of labels - is no more than 10. - Only set for model level evaluation, not for - evaluation per label. - annotation_spec_id (MutableSequence[str]): - Output only. The annotation spec ids used for - this evaluation. - """ - - class ConfidenceMetricsEntry(proto.Message): - r"""Metrics for a single confidence threshold. - - Attributes: - confidence_threshold (float): - Output only. Metrics are computed with an - assumption that the model never returns - predictions with score lower than this value. - position_threshold (int): - Output only. Metrics are computed with an assumption that - the model always returns at most this many predictions - (ordered by their score, descendingly), but they all still - need to meet the confidence_threshold. - recall (float): - Output only. Recall (True Positive Rate) for - the given confidence threshold. - precision (float): - Output only. Precision for the given - confidence threshold. - false_positive_rate (float): - Output only. False Positive Rate for the - given confidence threshold. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - recall_at1 (float): - Output only. The Recall (True Positive Rate) - when only considering the label that has the - highest prediction score and not below the - confidence threshold for each example. - precision_at1 (float): - Output only. The precision when only - considering the label that has the highest - prediction score and not below the confidence - threshold for each example. - false_positive_rate_at1 (float): - Output only. The False Positive Rate when - only considering the label that has the highest - prediction score and not below the confidence - threshold for each example. - f1_score_at1 (float): - Output only. The harmonic mean of - [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] - and - [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. - true_positive_count (int): - Output only. The number of model created - labels that match a ground truth label. - false_positive_count (int): - Output only. The number of model created - labels that do not match a ground truth label. - false_negative_count (int): - Output only. The number of ground truth - labels that are not matched by a model created - label. - true_negative_count (int): - Output only. The number of labels that were - not created by the model, but if they would, - they would not match a ground truth label. - """ - - confidence_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - position_threshold: int = proto.Field( - proto.INT32, - number=14, - ) - recall: float = proto.Field( - proto.FLOAT, - number=2, - ) - precision: float = proto.Field( - proto.FLOAT, - number=3, - ) - false_positive_rate: float = proto.Field( - proto.FLOAT, - number=8, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=4, - ) - recall_at1: float = proto.Field( - proto.FLOAT, - number=5, - ) - precision_at1: float = proto.Field( - proto.FLOAT, - number=6, - ) - false_positive_rate_at1: float = proto.Field( - proto.FLOAT, - number=9, - ) - f1_score_at1: float = proto.Field( - proto.FLOAT, - number=7, - ) - true_positive_count: int = proto.Field( - proto.INT64, - number=10, - ) - false_positive_count: int = proto.Field( - proto.INT64, - number=11, - ) - false_negative_count: int = proto.Field( - proto.INT64, - number=12, - ) - true_negative_count: int = proto.Field( - proto.INT64, - number=13, - ) - - class ConfusionMatrix(proto.Message): - r"""Confusion matrix of the model running the classification. - - Attributes: - annotation_spec_id (MutableSequence[str]): - Output only. IDs of the annotation specs used in the - confusion matrix. For Tables CLASSIFICATION - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] - only list of [annotation_spec_display_name-s][] is - populated. - display_name (MutableSequence[str]): - Output only. Display name of the annotation specs used in - the confusion matrix, as they were at the moment of the - evaluation. For Tables CLASSIFICATION - - [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], - distinct values of the target column at the moment of the - model evaluation are populated here. - row (MutableSequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): - Output only. Rows in the confusion matrix. The number of - rows is equal to the size of ``annotation_spec_id``. - ``row[i].example_count[j]`` is the number of examples that - have ground truth of the ``annotation_spec_id[i]`` and are - predicted as ``annotation_spec_id[j]`` by the model being - evaluated. - """ - - class Row(proto.Message): - r"""Output only. A row in the confusion matrix. - - Attributes: - example_count (MutableSequence[int]): - Output only. Value of the specific cell in the confusion - matrix. The number of values each row has (i.e. the length - of the row) is equal to the length of the - ``annotation_spec_id`` field or, if that one is not - populated, length of the - [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] - field. - """ - - example_count: MutableSequence[int] = proto.RepeatedField( - proto.INT32, - number=1, - ) - - annotation_spec_id: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=1, - ) - display_name: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=3, - ) - row: MutableSequence['ClassificationEvaluationMetrics.ConfusionMatrix.Row'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ClassificationEvaluationMetrics.ConfusionMatrix.Row', - ) - - au_prc: float = proto.Field( - proto.FLOAT, - number=1, - ) - base_au_prc: float = proto.Field( - proto.FLOAT, - number=2, - ) - au_roc: float = proto.Field( - proto.FLOAT, - number=6, - ) - log_loss: float = proto.Field( - proto.FLOAT, - number=7, - ) - confidence_metrics_entry: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=ConfidenceMetricsEntry, - ) - confusion_matrix: ConfusionMatrix = proto.Field( - proto.MESSAGE, - number=4, - message=ConfusionMatrix, - ) - annotation_spec_id: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py deleted file mode 100644 index fc917f3f..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/column_spec.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import data_stats as gca_data_stats -from google.cloud.automl_v1beta1.types import data_types - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'ColumnSpec', - }, -) - - -class ColumnSpec(proto.Message): - r"""A representation of a column in a relational table. When listing - them, column specs are returned in the same order in which they were - given on import . Used by: - - - Tables - - Attributes: - name (str): - Output only. The resource name of the column specs. Form: - - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}/columnSpecs/{column_spec_id}`` - data_type (google.cloud.automl_v1beta1.types.DataType): - The data type of elements stored in the - column. - display_name (str): - Output only. The name of the column to show in the - interface. The name can be up to 100 characters long and can - consist only of ASCII Latin letters A-Z and a-z, ASCII - digits 0-9, underscores(_), and forward slashes(/), and must - start with a letter or a digit. - data_stats (google.cloud.automl_v1beta1.types.DataStats): - Output only. Stats of the series of values in the column. - This field may be stale, see the ancestor's - Dataset.tables_dataset_metadata.stats_update_time field for - the timestamp at which these stats were last updated. - top_correlated_columns (MutableSequence[google.cloud.automl_v1beta1.types.ColumnSpec.CorrelatedColumn]): - Deprecated. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - """ - - class CorrelatedColumn(proto.Message): - r"""Identifies the table's column, and its correlation with the - column this ColumnSpec describes. - - Attributes: - column_spec_id (str): - The column_spec_id of the correlated column, which belongs - to the same table as the in-context column. - correlation_stats (google.cloud.automl_v1beta1.types.CorrelationStats): - Correlation between this and the in-context - column. - """ - - column_spec_id: str = proto.Field( - proto.STRING, - number=1, - ) - correlation_stats: gca_data_stats.CorrelationStats = proto.Field( - proto.MESSAGE, - number=2, - message=gca_data_stats.CorrelationStats, - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - data_type: data_types.DataType = proto.Field( - proto.MESSAGE, - number=2, - message=data_types.DataType, - ) - display_name: str = proto.Field( - proto.STRING, - number=3, - ) - data_stats: gca_data_stats.DataStats = proto.Field( - proto.MESSAGE, - number=4, - message=gca_data_stats.DataStats, - ) - top_correlated_columns: MutableSequence[CorrelatedColumn] = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=CorrelatedColumn, - ) - etag: str = proto.Field( - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py deleted file mode 100644 index 961dfe80..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_items.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import geometry -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'Image', - 'TextSnippet', - 'DocumentDimensions', - 'Document', - 'Row', - 'ExamplePayload', - }, -) - - -class Image(proto.Message): - r"""A representation of an image. - Only images up to 30MB in size are supported. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - image_bytes (bytes): - Image content represented as a stream of bytes. Note: As - with all ``bytes`` fields, protobuffers use a pure binary - representation, whereas JSON representations use base64. - - This field is a member of `oneof`_ ``data``. - input_config (google.cloud.automl_v1beta1.types.InputConfig): - An input config specifying the content of the - image. - - This field is a member of `oneof`_ ``data``. - thumbnail_uri (str): - Output only. HTTP URI to the thumbnail image. - """ - - image_bytes: bytes = proto.Field( - proto.BYTES, - number=1, - oneof='data', - ) - input_config: io.InputConfig = proto.Field( - proto.MESSAGE, - number=6, - oneof='data', - message=io.InputConfig, - ) - thumbnail_uri: str = proto.Field( - proto.STRING, - number=4, - ) - - -class TextSnippet(proto.Message): - r"""A representation of a text snippet. - - Attributes: - content (str): - Required. The content of the text snippet as - a string. Up to 250000 characters long. - mime_type (str): - Optional. The format of - [content][google.cloud.automl.v1beta1.TextSnippet.content]. - Currently the only two allowed values are "text/html" and - "text/plain". If left blank, the format is automatically - determined from the type of the uploaded - [content][google.cloud.automl.v1beta1.TextSnippet.content]. - content_uri (str): - Output only. HTTP URI where you can download - the content. - """ - - content: str = proto.Field( - proto.STRING, - number=1, - ) - mime_type: str = proto.Field( - proto.STRING, - number=2, - ) - content_uri: str = proto.Field( - proto.STRING, - number=4, - ) - - -class DocumentDimensions(proto.Message): - r"""Message that describes dimension of a document. - - Attributes: - unit (google.cloud.automl_v1beta1.types.DocumentDimensions.DocumentDimensionUnit): - Unit of the dimension. - width (float): - Width value of the document, works together - with the unit. - height (float): - Height value of the document, works together - with the unit. - """ - class DocumentDimensionUnit(proto.Enum): - r"""Unit of the document dimension. - - Values: - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED (0): - Should not be used. - INCH (1): - Document dimension is measured in inches. - CENTIMETER (2): - Document dimension is measured in - centimeters. - POINT (3): - Document dimension is measured in points. 72 - points = 1 inch. - """ - DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = 0 - INCH = 1 - CENTIMETER = 2 - POINT = 3 - - unit: DocumentDimensionUnit = proto.Field( - proto.ENUM, - number=1, - enum=DocumentDimensionUnit, - ) - width: float = proto.Field( - proto.FLOAT, - number=2, - ) - height: float = proto.Field( - proto.FLOAT, - number=3, - ) - - -class Document(proto.Message): - r"""A structured text document e.g. a PDF. - - Attributes: - input_config (google.cloud.automl_v1beta1.types.DocumentInputConfig): - An input config specifying the content of the - document. - document_text (google.cloud.automl_v1beta1.types.TextSnippet): - The plain text version of this document. - layout (MutableSequence[google.cloud.automl_v1beta1.types.Document.Layout]): - Describes the layout of the document. Sorted by - [page_number][]. - document_dimensions (google.cloud.automl_v1beta1.types.DocumentDimensions): - The dimensions of the page in the document. - page_count (int): - Number of pages in the document. - """ - - class Layout(proto.Message): - r"""Describes the layout information of a - [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - in the document. - - Attributes: - text_segment (google.cloud.automl_v1beta1.types.TextSegment): - Text Segment that represents a segment in - [document_text][google.cloud.automl.v1beta1.Document.document_text]. - page_number (int): - Page number of the - [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - in the original document, starts from 1. - bounding_poly (google.cloud.automl_v1beta1.types.BoundingPoly): - The position of the - [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - in the page. Contains exactly 4 - - [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices] - and they are connected by edges in the order provided, which - will represent a rectangle parallel to the frame. The - [NormalizedVertex-s][google.cloud.automl.v1beta1.NormalizedVertex] - are relative to the page. Coordinates are based on top-left - as point (0,0). - text_segment_type (google.cloud.automl_v1beta1.types.Document.Layout.TextSegmentType): - The type of the - [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - in document. - """ - class TextSegmentType(proto.Enum): - r"""The type of TextSegment in the context of the original - document. - - Values: - TEXT_SEGMENT_TYPE_UNSPECIFIED (0): - Should not be used. - TOKEN (1): - The text segment is a token. e.g. word. - PARAGRAPH (2): - The text segment is a paragraph. - FORM_FIELD (3): - The text segment is a form field. - FORM_FIELD_NAME (4): - The text segment is the name part of a form field. It will - be treated as child of another FORM_FIELD TextSegment if its - span is subspan of another TextSegment with type FORM_FIELD. - FORM_FIELD_CONTENTS (5): - The text segment is the text content part of a form field. - It will be treated as child of another FORM_FIELD - TextSegment if its span is subspan of another TextSegment - with type FORM_FIELD. - TABLE (6): - The text segment is a whole table, including - headers, and all rows. - TABLE_HEADER (7): - The text segment is a table's headers. It - will be treated as child of another TABLE - TextSegment if its span is subspan of another - TextSegment with type TABLE. - TABLE_ROW (8): - The text segment is a row in table. It will - be treated as child of another TABLE TextSegment - if its span is subspan of another TextSegment - with type TABLE. - TABLE_CELL (9): - The text segment is a cell in table. It will be treated as - child of another TABLE_ROW TextSegment if its span is - subspan of another TextSegment with type TABLE_ROW. - """ - TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 - TOKEN = 1 - PARAGRAPH = 2 - FORM_FIELD = 3 - FORM_FIELD_NAME = 4 - FORM_FIELD_CONTENTS = 5 - TABLE = 6 - TABLE_HEADER = 7 - TABLE_ROW = 8 - TABLE_CELL = 9 - - text_segment: gca_text_segment.TextSegment = proto.Field( - proto.MESSAGE, - number=1, - message=gca_text_segment.TextSegment, - ) - page_number: int = proto.Field( - proto.INT32, - number=2, - ) - bounding_poly: geometry.BoundingPoly = proto.Field( - proto.MESSAGE, - number=3, - message=geometry.BoundingPoly, - ) - text_segment_type: 'Document.Layout.TextSegmentType' = proto.Field( - proto.ENUM, - number=4, - enum='Document.Layout.TextSegmentType', - ) - - input_config: io.DocumentInputConfig = proto.Field( - proto.MESSAGE, - number=1, - message=io.DocumentInputConfig, - ) - document_text: 'TextSnippet' = proto.Field( - proto.MESSAGE, - number=2, - message='TextSnippet', - ) - layout: MutableSequence[Layout] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Layout, - ) - document_dimensions: 'DocumentDimensions' = proto.Field( - proto.MESSAGE, - number=4, - message='DocumentDimensions', - ) - page_count: int = proto.Field( - proto.INT32, - number=5, - ) - - -class Row(proto.Message): - r"""A representation of a row in a relational table. - - Attributes: - column_spec_ids (MutableSequence[str]): - The resource IDs of the column specs describing the columns - of the row. If set must contain, but possibly in a different - order, all input feature - - [column_spec_ids][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - of the Model this row is being passed to. Note: The below - ``values`` field must match order of this field, if this - field is set. - values (MutableSequence[google.protobuf.struct_pb2.Value]): - Required. The values of the row cells, given in the same - order as the column_spec_ids, or, if not set, then in the - same order as input feature - - [column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - of the Model this row is being passed to. - """ - - column_spec_ids: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=2, - ) - values: MutableSequence[struct_pb2.Value] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -class ExamplePayload(proto.Message): - r"""Example data used for training or prediction. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - image (google.cloud.automl_v1beta1.types.Image): - Example image. - - This field is a member of `oneof`_ ``payload``. - text_snippet (google.cloud.automl_v1beta1.types.TextSnippet): - Example text. - - This field is a member of `oneof`_ ``payload``. - document (google.cloud.automl_v1beta1.types.Document): - Example document. - - This field is a member of `oneof`_ ``payload``. - row (google.cloud.automl_v1beta1.types.Row): - Example relational table row. - - This field is a member of `oneof`_ ``payload``. - """ - - image: 'Image' = proto.Field( - proto.MESSAGE, - number=1, - oneof='payload', - message='Image', - ) - text_snippet: 'TextSnippet' = proto.Field( - proto.MESSAGE, - number=2, - oneof='payload', - message='TextSnippet', - ) - document: 'Document' = proto.Field( - proto.MESSAGE, - number=4, - oneof='payload', - message='Document', - ) - row: 'Row' = proto.Field( - proto.MESSAGE, - number=3, - oneof='payload', - message='Row', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py deleted file mode 100644 index 1b5019c3..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_stats.py +++ /dev/null @@ -1,361 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'DataStats', - 'Float64Stats', - 'StringStats', - 'TimestampStats', - 'ArrayStats', - 'StructStats', - 'CategoryStats', - 'CorrelationStats', - }, -) - - -class DataStats(proto.Message): - r"""The data statistics of a series of values that share the same - DataType. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - float64_stats (google.cloud.automl_v1beta1.types.Float64Stats): - The statistics for FLOAT64 DataType. - - This field is a member of `oneof`_ ``stats``. - string_stats (google.cloud.automl_v1beta1.types.StringStats): - The statistics for STRING DataType. - - This field is a member of `oneof`_ ``stats``. - timestamp_stats (google.cloud.automl_v1beta1.types.TimestampStats): - The statistics for TIMESTAMP DataType. - - This field is a member of `oneof`_ ``stats``. - array_stats (google.cloud.automl_v1beta1.types.ArrayStats): - The statistics for ARRAY DataType. - - This field is a member of `oneof`_ ``stats``. - struct_stats (google.cloud.automl_v1beta1.types.StructStats): - The statistics for STRUCT DataType. - - This field is a member of `oneof`_ ``stats``. - category_stats (google.cloud.automl_v1beta1.types.CategoryStats): - The statistics for CATEGORY DataType. - - This field is a member of `oneof`_ ``stats``. - distinct_value_count (int): - The number of distinct values. - null_value_count (int): - The number of values that are null. - valid_value_count (int): - The number of values that are valid. - """ - - float64_stats: 'Float64Stats' = proto.Field( - proto.MESSAGE, - number=3, - oneof='stats', - message='Float64Stats', - ) - string_stats: 'StringStats' = proto.Field( - proto.MESSAGE, - number=4, - oneof='stats', - message='StringStats', - ) - timestamp_stats: 'TimestampStats' = proto.Field( - proto.MESSAGE, - number=5, - oneof='stats', - message='TimestampStats', - ) - array_stats: 'ArrayStats' = proto.Field( - proto.MESSAGE, - number=6, - oneof='stats', - message='ArrayStats', - ) - struct_stats: 'StructStats' = proto.Field( - proto.MESSAGE, - number=7, - oneof='stats', - message='StructStats', - ) - category_stats: 'CategoryStats' = proto.Field( - proto.MESSAGE, - number=8, - oneof='stats', - message='CategoryStats', - ) - distinct_value_count: int = proto.Field( - proto.INT64, - number=1, - ) - null_value_count: int = proto.Field( - proto.INT64, - number=2, - ) - valid_value_count: int = proto.Field( - proto.INT64, - number=9, - ) - - -class Float64Stats(proto.Message): - r"""The data statistics of a series of FLOAT64 values. - - Attributes: - mean (float): - The mean of the series. - standard_deviation (float): - The standard deviation of the series. - quantiles (MutableSequence[float]): - Ordered from 0 to k k-quantile values of the data series of - n values. The value at index i is, approximately, the - i*n/k-th smallest value in the series; for i = 0 and i = k - these are, respectively, the min and max values. - histogram_buckets (MutableSequence[google.cloud.automl_v1beta1.types.Float64Stats.HistogramBucket]): - Histogram buckets of the data series. Sorted by the min - value of the bucket, ascendingly, and the number of the - buckets is dynamically generated. The buckets are - non-overlapping and completely cover whole FLOAT64 range - with min of first bucket being ``"-Infinity"``, and max of - the last one being ``"Infinity"``. - """ - - class HistogramBucket(proto.Message): - r"""A bucket of a histogram. - - Attributes: - min_ (float): - The minimum value of the bucket, inclusive. - max_ (float): - The maximum value of the bucket, exclusive unless max = - ``"Infinity"``, in which case it's inclusive. - count (int): - The number of data values that are in the - bucket, i.e. are between min and max values. - """ - - min_: float = proto.Field( - proto.DOUBLE, - number=1, - ) - max_: float = proto.Field( - proto.DOUBLE, - number=2, - ) - count: int = proto.Field( - proto.INT64, - number=3, - ) - - mean: float = proto.Field( - proto.DOUBLE, - number=1, - ) - standard_deviation: float = proto.Field( - proto.DOUBLE, - number=2, - ) - quantiles: MutableSequence[float] = proto.RepeatedField( - proto.DOUBLE, - number=3, - ) - histogram_buckets: MutableSequence[HistogramBucket] = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=HistogramBucket, - ) - - -class StringStats(proto.Message): - r"""The data statistics of a series of STRING values. - - Attributes: - top_unigram_stats (MutableSequence[google.cloud.automl_v1beta1.types.StringStats.UnigramStats]): - The statistics of the top 20 unigrams, ordered by - [count][google.cloud.automl.v1beta1.StringStats.UnigramStats.count]. - """ - - class UnigramStats(proto.Message): - r"""The statistics of a unigram. - - Attributes: - value (str): - The unigram. - count (int): - The number of occurrences of this unigram in - the series. - """ - - value: str = proto.Field( - proto.STRING, - number=1, - ) - count: int = proto.Field( - proto.INT64, - number=2, - ) - - top_unigram_stats: MutableSequence[UnigramStats] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=UnigramStats, - ) - - -class TimestampStats(proto.Message): - r"""The data statistics of a series of TIMESTAMP values. - - Attributes: - granular_stats (MutableMapping[str, google.cloud.automl_v1beta1.types.TimestampStats.GranularStats]): - The string key is the pre-defined granularity. Currently - supported: hour_of_day, day_of_week, month_of_year. - Granularities finer that the granularity of timestamp data - are not populated (e.g. if timestamps are at day - granularity, then hour_of_day is not populated). - """ - - class GranularStats(proto.Message): - r"""Stats split by a defined in context granularity. - - Attributes: - buckets (MutableMapping[int, int]): - A map from granularity key to example count for that key. - E.g. for hour_of_day ``13`` means 1pm, or for month_of_year - ``5`` means May). - """ - - buckets: MutableMapping[int, int] = proto.MapField( - proto.INT32, - proto.INT64, - number=1, - ) - - granular_stats: MutableMapping[str, GranularStats] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=GranularStats, - ) - - -class ArrayStats(proto.Message): - r"""The data statistics of a series of ARRAY values. - - Attributes: - member_stats (google.cloud.automl_v1beta1.types.DataStats): - Stats of all the values of all arrays, as if - they were a single long series of data. The type - depends on the element type of the array. - """ - - member_stats: 'DataStats' = proto.Field( - proto.MESSAGE, - number=2, - message='DataStats', - ) - - -class StructStats(proto.Message): - r"""The data statistics of a series of STRUCT values. - - Attributes: - field_stats (MutableMapping[str, google.cloud.automl_v1beta1.types.DataStats]): - Map from a field name of the struct to data - stats aggregated over series of all data in that - field across all the structs. - """ - - field_stats: MutableMapping[str, 'DataStats'] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='DataStats', - ) - - -class CategoryStats(proto.Message): - r"""The data statistics of a series of CATEGORY values. - - Attributes: - top_category_stats (MutableSequence[google.cloud.automl_v1beta1.types.CategoryStats.SingleCategoryStats]): - The statistics of the top 20 CATEGORY values, ordered by - - [count][google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count]. - """ - - class SingleCategoryStats(proto.Message): - r"""The statistics of a single CATEGORY value. - - Attributes: - value (str): - The CATEGORY value. - count (int): - The number of occurrences of this value in - the series. - """ - - value: str = proto.Field( - proto.STRING, - number=1, - ) - count: int = proto.Field( - proto.INT64, - number=2, - ) - - top_category_stats: MutableSequence[SingleCategoryStats] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=SingleCategoryStats, - ) - - -class CorrelationStats(proto.Message): - r"""A correlation statistics between two series of DataType - values. The series may have differing DataType-s, but within a - single series the DataType must be the same. - - Attributes: - cramers_v (float): - The correlation value using the Cramer's V - measure. - """ - - cramers_v: float = proto.Field( - proto.DOUBLE, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py deleted file mode 100644 index 528fcfed..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/data_types.py +++ /dev/null @@ -1,180 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TypeCode', - 'DataType', - 'StructType', - }, -) - - -class TypeCode(proto.Enum): - r"""``TypeCode`` is used as a part of - [DataType][google.cloud.automl.v1beta1.DataType]. - - Values: - TYPE_CODE_UNSPECIFIED (0): - Not specified. Should not be used. - FLOAT64 (3): - Encoded as ``number``, or the strings ``"NaN"``, - ``"Infinity"``, or ``"-Infinity"``. - TIMESTAMP (4): - Must be between 0AD and 9999AD. Encoded as ``string`` - according to - [time_format][google.cloud.automl.v1beta1.DataType.time_format], - or, if that format is not set, then in RFC 3339 - ``date-time`` format, where ``time-offset`` = ``"Z"`` (e.g. - 1985-04-12T23:20:50.52Z). - STRING (6): - Encoded as ``string``. - ARRAY (8): - Encoded as ``list``, where the list elements are represented - according to - - [list_element_type][google.cloud.automl.v1beta1.DataType.list_element_type]. - STRUCT (9): - Encoded as ``struct``, where field values are represented - according to - [struct_type][google.cloud.automl.v1beta1.DataType.struct_type]. - CATEGORY (10): - Values of this type are not further understood by AutoML, - e.g. AutoML is unable to tell the order of values (as it - could with FLOAT64), or is unable to say if one value - contains another (as it could with STRING). Encoded as - ``string`` (bytes should be base64-encoded, as described in - RFC 4648, section 4). - """ - TYPE_CODE_UNSPECIFIED = 0 - FLOAT64 = 3 - TIMESTAMP = 4 - STRING = 6 - ARRAY = 8 - STRUCT = 9 - CATEGORY = 10 - - -class DataType(proto.Message): - r"""Indicated the type of data that can be stored in a structured - data entity (e.g. a table). - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - list_element_type (google.cloud.automl_v1beta1.types.DataType): - If - [type_code][google.cloud.automl.v1beta1.DataType.type_code] - == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then - ``list_element_type`` is the type of the elements. - - This field is a member of `oneof`_ ``details``. - struct_type (google.cloud.automl_v1beta1.types.StructType): - If - [type_code][google.cloud.automl.v1beta1.DataType.type_code] - == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], - then ``struct_type`` provides type information for the - struct's fields. - - This field is a member of `oneof`_ ``details``. - time_format (str): - If - [type_code][google.cloud.automl.v1beta1.DataType.type_code] - == - [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP] - then ``time_format`` provides the format in which that time - field is expressed. The time_format must either be one of: - - - ``UNIX_SECONDS`` - - ``UNIX_MILLISECONDS`` - - ``UNIX_MICROSECONDS`` - - ``UNIX_NANOSECONDS`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format as - described on the type_code is used. - - This field is a member of `oneof`_ ``details``. - type_code (google.cloud.automl_v1beta1.types.TypeCode): - Required. The - [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this - type. - nullable (bool): - If true, this DataType can also be ``NULL``. In .CSV files - ``NULL`` value is expressed as an empty string. - """ - - list_element_type: 'DataType' = proto.Field( - proto.MESSAGE, - number=2, - oneof='details', - message='DataType', - ) - struct_type: 'StructType' = proto.Field( - proto.MESSAGE, - number=3, - oneof='details', - message='StructType', - ) - time_format: str = proto.Field( - proto.STRING, - number=5, - oneof='details', - ) - type_code: 'TypeCode' = proto.Field( - proto.ENUM, - number=1, - enum='TypeCode', - ) - nullable: bool = proto.Field( - proto.BOOL, - number=4, - ) - - -class StructType(proto.Message): - r"""``StructType`` defines the DataType-s of a - [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. - - Attributes: - fields (MutableMapping[str, google.cloud.automl_v1beta1.types.DataType]): - Unordered map of struct field names to their - data types. Fields cannot be added or removed - via Update. Their names and data types are still - mutable. - """ - - fields: MutableMapping[str, 'DataType'] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='DataType', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py deleted file mode 100644 index 8aa67e0e..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/dataset.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import image -from google.cloud.automl_v1beta1.types import tables -from google.cloud.automl_v1beta1.types import text -from google.cloud.automl_v1beta1.types import translation -from google.cloud.automl_v1beta1.types import video -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'Dataset', - }, -) - - -class Dataset(proto.Message): - r"""A workspace for solving a single, particular machine learning - (ML) problem. A workspace contains examples that may be - annotated. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - translation_dataset_metadata (google.cloud.automl_v1beta1.types.TranslationDatasetMetadata): - Metadata for a dataset used for translation. - - This field is a member of `oneof`_ ``dataset_metadata``. - image_classification_dataset_metadata (google.cloud.automl_v1beta1.types.ImageClassificationDatasetMetadata): - Metadata for a dataset used for image - classification. - - This field is a member of `oneof`_ ``dataset_metadata``. - text_classification_dataset_metadata (google.cloud.automl_v1beta1.types.TextClassificationDatasetMetadata): - Metadata for a dataset used for text - classification. - - This field is a member of `oneof`_ ``dataset_metadata``. - image_object_detection_dataset_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionDatasetMetadata): - Metadata for a dataset used for image object - detection. - - This field is a member of `oneof`_ ``dataset_metadata``. - video_classification_dataset_metadata (google.cloud.automl_v1beta1.types.VideoClassificationDatasetMetadata): - Metadata for a dataset used for video - classification. - - This field is a member of `oneof`_ ``dataset_metadata``. - video_object_tracking_dataset_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingDatasetMetadata): - Metadata for a dataset used for video object - tracking. - - This field is a member of `oneof`_ ``dataset_metadata``. - text_extraction_dataset_metadata (google.cloud.automl_v1beta1.types.TextExtractionDatasetMetadata): - Metadata for a dataset used for text - extraction. - - This field is a member of `oneof`_ ``dataset_metadata``. - text_sentiment_dataset_metadata (google.cloud.automl_v1beta1.types.TextSentimentDatasetMetadata): - Metadata for a dataset used for text - sentiment. - - This field is a member of `oneof`_ ``dataset_metadata``. - tables_dataset_metadata (google.cloud.automl_v1beta1.types.TablesDatasetMetadata): - Metadata for a dataset used for Tables. - - This field is a member of `oneof`_ ``dataset_metadata``. - name (str): - Output only. The resource name of the dataset. Form: - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`` - display_name (str): - Required. The name of the dataset to show in the interface. - The name can be up to 32 characters long and can consist - only of ASCII Latin letters A-Z and a-z, underscores (_), - and ASCII digits 0-9. - description (str): - User-provided description of the dataset. The - description can be up to 25000 characters long. - example_count (int): - Output only. The number of examples in the - dataset. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this dataset was - created. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - """ - - translation_dataset_metadata: translation.TranslationDatasetMetadata = proto.Field( - proto.MESSAGE, - number=23, - oneof='dataset_metadata', - message=translation.TranslationDatasetMetadata, - ) - image_classification_dataset_metadata: image.ImageClassificationDatasetMetadata = proto.Field( - proto.MESSAGE, - number=24, - oneof='dataset_metadata', - message=image.ImageClassificationDatasetMetadata, - ) - text_classification_dataset_metadata: text.TextClassificationDatasetMetadata = proto.Field( - proto.MESSAGE, - number=25, - oneof='dataset_metadata', - message=text.TextClassificationDatasetMetadata, - ) - image_object_detection_dataset_metadata: image.ImageObjectDetectionDatasetMetadata = proto.Field( - proto.MESSAGE, - number=26, - oneof='dataset_metadata', - message=image.ImageObjectDetectionDatasetMetadata, - ) - video_classification_dataset_metadata: video.VideoClassificationDatasetMetadata = proto.Field( - proto.MESSAGE, - number=31, - oneof='dataset_metadata', - message=video.VideoClassificationDatasetMetadata, - ) - video_object_tracking_dataset_metadata: video.VideoObjectTrackingDatasetMetadata = proto.Field( - proto.MESSAGE, - number=29, - oneof='dataset_metadata', - message=video.VideoObjectTrackingDatasetMetadata, - ) - text_extraction_dataset_metadata: text.TextExtractionDatasetMetadata = proto.Field( - proto.MESSAGE, - number=28, - oneof='dataset_metadata', - message=text.TextExtractionDatasetMetadata, - ) - text_sentiment_dataset_metadata: text.TextSentimentDatasetMetadata = proto.Field( - proto.MESSAGE, - number=30, - oneof='dataset_metadata', - message=text.TextSentimentDatasetMetadata, - ) - tables_dataset_metadata: tables.TablesDatasetMetadata = proto.Field( - proto.MESSAGE, - number=33, - oneof='dataset_metadata', - message=tables.TablesDatasetMetadata, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - description: str = proto.Field( - proto.STRING, - number=3, - ) - example_count: int = proto.Field( - proto.INT32, - number=21, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - etag: str = proto.Field( - proto.STRING, - number=17, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py deleted file mode 100644 index 19dfde32..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/detection.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import geometry -from google.protobuf import duration_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'ImageObjectDetectionAnnotation', - 'VideoObjectTrackingAnnotation', - 'BoundingBoxMetricsEntry', - 'ImageObjectDetectionEvaluationMetrics', - 'VideoObjectTrackingEvaluationMetrics', - }, -) - - -class ImageObjectDetectionAnnotation(proto.Message): - r"""Annotation details for image object detection. - - Attributes: - bounding_box (google.cloud.automl_v1beta1.types.BoundingPoly): - Output only. The rectangle representing the - object location. - score (float): - Output only. The confidence that this annotation is positive - for the parent example, value in [0, 1], higher means higher - positivity confidence. - """ - - bounding_box: geometry.BoundingPoly = proto.Field( - proto.MESSAGE, - number=1, - message=geometry.BoundingPoly, - ) - score: float = proto.Field( - proto.FLOAT, - number=2, - ) - - -class VideoObjectTrackingAnnotation(proto.Message): - r"""Annotation details for video object tracking. - - Attributes: - instance_id (str): - Optional. The instance of the object, - expressed as a positive integer. Used to tell - apart objects of the same type (i.e. - AnnotationSpec) when multiple are present on a - single example. - NOTE: Instance ID prediction quality is not a - part of model evaluation and is done as best - effort. Especially in cases when an entity goes - off-screen for a longer time (minutes), when it - comes back it may be given a new instance ID. - time_offset (google.protobuf.duration_pb2.Duration): - Required. A time (frame) of a video to which - this annotation pertains. Represented as the - duration since the video's start. - bounding_box (google.cloud.automl_v1beta1.types.BoundingPoly): - Required. The rectangle representing the object location on - the frame (i.e. at the time_offset of the video). - score (float): - Output only. The confidence that this annotation is positive - for the video at the time_offset, value in [0, 1], higher - means higher positivity confidence. For annotations created - by the user the score is 1. When user approves an - annotation, the original float score is kept (and not - changed to 1). - """ - - instance_id: str = proto.Field( - proto.STRING, - number=1, - ) - time_offset: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - bounding_box: geometry.BoundingPoly = proto.Field( - proto.MESSAGE, - number=3, - message=geometry.BoundingPoly, - ) - score: float = proto.Field( - proto.FLOAT, - number=4, - ) - - -class BoundingBoxMetricsEntry(proto.Message): - r"""Bounding box matching model metrics for a single - intersection-over-union threshold and multiple label match - confidence thresholds. - - Attributes: - iou_threshold (float): - Output only. The intersection-over-union - threshold value used to compute this metrics - entry. - mean_average_precision (float): - Output only. The mean average precision, most often close to - au_prc. - confidence_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): - Output only. Metrics for each label-match - confidence_threshold from - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall - curve is derived from them. - """ - - class ConfidenceMetricsEntry(proto.Message): - r"""Metrics for a single confidence threshold. - - Attributes: - confidence_threshold (float): - Output only. The confidence threshold value - used to compute the metrics. - recall (float): - Output only. Recall under the given - confidence threshold. - precision (float): - Output only. Precision under the given - confidence threshold. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - """ - - confidence_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - recall: float = proto.Field( - proto.FLOAT, - number=2, - ) - precision: float = proto.Field( - proto.FLOAT, - number=3, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=4, - ) - - iou_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - mean_average_precision: float = proto.Field( - proto.FLOAT, - number=2, - ) - confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=ConfidenceMetricsEntry, - ) - - -class ImageObjectDetectionEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for image object detection problems. - Evaluates prediction quality of labeled bounding boxes. - - Attributes: - evaluated_bounding_box_count (int): - Output only. The total number of bounding - boxes (i.e. summed over all images) the ground - truth used to create this evaluation had. - bounding_box_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry]): - Output only. The bounding boxes match metrics - for each Intersection-over-union threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each - label confidence threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. - bounding_box_mean_average_precision (float): - Output only. The single metric for bounding boxes - evaluation: the mean_average_precision averaged over all - bounding_box_metrics_entries. - """ - - evaluated_bounding_box_count: int = proto.Field( - proto.INT32, - number=1, - ) - bounding_box_metrics_entries: MutableSequence['BoundingBoxMetricsEntry'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='BoundingBoxMetricsEntry', - ) - bounding_box_mean_average_precision: float = proto.Field( - proto.FLOAT, - number=3, - ) - - -class VideoObjectTrackingEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for video object tracking problems. - Evaluates prediction quality of both labeled bounding boxes and - labeled tracks (i.e. series of bounding boxes sharing same label - and instance ID). - - Attributes: - evaluated_frame_count (int): - Output only. The number of video frames used - to create this evaluation. - evaluated_bounding_box_count (int): - Output only. The total number of bounding - boxes (i.e. summed over all frames) the ground - truth used to create this evaluation had. - bounding_box_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry]): - Output only. The bounding boxes match metrics - for each Intersection-over-union threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each - label confidence threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. - bounding_box_mean_average_precision (float): - Output only. The single metric for bounding boxes - evaluation: the mean_average_precision averaged over all - bounding_box_metrics_entries. - """ - - evaluated_frame_count: int = proto.Field( - proto.INT32, - number=1, - ) - evaluated_bounding_box_count: int = proto.Field( - proto.INT32, - number=2, - ) - bounding_box_metrics_entries: MutableSequence['BoundingBoxMetricsEntry'] = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='BoundingBoxMetricsEntry', - ) - bounding_box_mean_average_precision: float = proto.Field( - proto.FLOAT, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py deleted file mode 100644 index 9474a410..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/geometry.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'NormalizedVertex', - 'BoundingPoly', - }, -) - - -class NormalizedVertex(proto.Message): - r"""A vertex represents a 2D point in the image. - The normalized vertex coordinates are between 0 to 1 fractions - relative to the original plane (image, video). E.g. if the plane - (e.g. whole image) would have size 10 x 20 then a point with - normalized coordinates (0.1, 0.3) would be at the position (1, - 6) on that plane. - - Attributes: - x (float): - Required. Horizontal coordinate. - y (float): - Required. Vertical coordinate. - """ - - x: float = proto.Field( - proto.FLOAT, - number=1, - ) - y: float = proto.Field( - proto.FLOAT, - number=2, - ) - - -class BoundingPoly(proto.Message): - r"""A bounding polygon of a detected object on a plane. On output both - vertices and normalized_vertices are provided. The polygon is formed - by connecting vertices in the order they are listed. - - Attributes: - normalized_vertices (MutableSequence[google.cloud.automl_v1beta1.types.NormalizedVertex]): - Output only . The bounding polygon normalized - vertices. - """ - - normalized_vertices: MutableSequence['NormalizedVertex'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='NormalizedVertex', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py deleted file mode 100644 index 59c066b2..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/image.py +++ /dev/null @@ -1,304 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import classification - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'ImageClassificationDatasetMetadata', - 'ImageObjectDetectionDatasetMetadata', - 'ImageClassificationModelMetadata', - 'ImageObjectDetectionModelMetadata', - 'ImageClassificationModelDeploymentMetadata', - 'ImageObjectDetectionModelDeploymentMetadata', - }, -) - - -class ImageClassificationDatasetMetadata(proto.Message): - r"""Dataset metadata that is specific to image classification. - - Attributes: - classification_type (google.cloud.automl_v1beta1.types.ClassificationType): - Required. Type of the classification problem. - """ - - classification_type: classification.ClassificationType = proto.Field( - proto.ENUM, - number=1, - enum=classification.ClassificationType, - ) - - -class ImageObjectDetectionDatasetMetadata(proto.Message): - r"""Dataset metadata specific to image object detection. - """ - - -class ImageClassificationModelMetadata(proto.Message): - r"""Model metadata for image classification. - - Attributes: - base_model_id (str): - Optional. The ID of the ``base`` model. If it is specified, - the new model will be created based on the ``base`` model. - Otherwise, the new model will be created from scratch. The - ``base`` model must be in the same ``project`` and - ``location`` as the new model to create, and have the same - ``model_type``. - train_budget (int): - Required. The train budget of creating this model, expressed - in hours. The actual ``train_cost`` will be equal or less - than this value. - train_cost (int): - Output only. The actual train cost of creating this model, - expressed in hours. If this model is created from a ``base`` - model, the train cost used to create the ``base`` model are - not included. - stop_reason (str): - Output only. The reason that this create model operation - stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. - model_type (str): - Optional. Type of the model. The available values are: - - - ``cloud`` - Model to be used via prediction calls to - AutoML API. This is the default value. - - ``mobile-low-latency-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have low latency, but may have - lower prediction quality than other models. - - ``mobile-versatile-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. - - ``mobile-high-accuracy-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have a higher latency, but should - also have a higher prediction quality than other models. - - ``mobile-core-ml-low-latency-1`` - A model that, in - addition to providing prediction via AutoML API, can also - be exported (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile device with Core ML afterwards. - Expected to have low latency, but may have lower - prediction quality than other models. - - ``mobile-core-ml-versatile-1`` - A model that, in - addition to providing prediction via AutoML API, can also - be exported (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile device with Core ML afterwards. - - ``mobile-core-ml-high-accuracy-1`` - A model that, in - addition to providing prediction via AutoML API, can also - be exported (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile device with Core ML afterwards. - Expected to have a higher latency, but should also have a - higher prediction quality than other models. - node_qps (float): - Output only. An approximate number of online - prediction QPS that can be supported by this - model per each node on which it is deployed. - node_count (int): - Output only. The number of nodes this model is deployed on. - A node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the node_qps field. - """ - - base_model_id: str = proto.Field( - proto.STRING, - number=1, - ) - train_budget: int = proto.Field( - proto.INT64, - number=2, - ) - train_cost: int = proto.Field( - proto.INT64, - number=3, - ) - stop_reason: str = proto.Field( - proto.STRING, - number=5, - ) - model_type: str = proto.Field( - proto.STRING, - number=7, - ) - node_qps: float = proto.Field( - proto.DOUBLE, - number=13, - ) - node_count: int = proto.Field( - proto.INT64, - number=14, - ) - - -class ImageObjectDetectionModelMetadata(proto.Message): - r"""Model metadata specific to image object detection. - - Attributes: - model_type (str): - Optional. Type of the model. The available values are: - - - ``cloud-high-accuracy-1`` - (default) A model to be used - via prediction calls to AutoML API. Expected to have a - higher latency, but should also have a higher prediction - quality than other models. - - ``cloud-low-latency-1`` - A model to be used via - prediction calls to AutoML API. Expected to have low - latency, but may have lower prediction quality than other - models. - - ``mobile-low-latency-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have low latency, but may have - lower prediction quality than other models. - - ``mobile-versatile-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. - - ``mobile-high-accuracy-1`` - A model that, in addition to - providing prediction via AutoML API, can also be exported - (see - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - and used on a mobile or edge device with TensorFlow - afterwards. Expected to have a higher latency, but should - also have a higher prediction quality than other models. - node_count (int): - Output only. The number of nodes this model is deployed on. - A node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the qps_per_node - field. - node_qps (float): - Output only. An approximate number of online - prediction QPS that can be supported by this - model per each node on which it is deployed. - stop_reason (str): - Output only. The reason that this create model operation - stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. - train_budget_milli_node_hours (int): - The train budget of creating this model, expressed in milli - node hours i.e. 1,000 value in this field means 1 node hour. - The actual ``train_cost`` will be equal or less than this - value. If further model training ceases to provide any - improvements, it will stop without using full budget and the - stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = - actual_hour \* number_of_nodes_invovled. For model type - ``cloud-high-accuracy-1``\ (default) and - ``cloud-low-latency-1``, the train budget must be between - 20,000 and 900,000 milli node hours, inclusive. The default - value is 216, 000 which represents one day in wall time. For - model type ``mobile-low-latency-1``, ``mobile-versatile-1``, - ``mobile-high-accuracy-1``, - ``mobile-core-ml-low-latency-1``, - ``mobile-core-ml-versatile-1``, - ``mobile-core-ml-high-accuracy-1``, the train budget must be - between 1,000 and 100,000 milli node hours, inclusive. The - default value is 24, 000 which represents one day in wall - time. - train_cost_milli_node_hours (int): - Output only. The actual train cost of - creating this model, expressed in milli node - hours, i.e. 1,000 value in this field means 1 - node hour. Guaranteed to not exceed the train - budget. - """ - - model_type: str = proto.Field( - proto.STRING, - number=1, - ) - node_count: int = proto.Field( - proto.INT64, - number=3, - ) - node_qps: float = proto.Field( - proto.DOUBLE, - number=4, - ) - stop_reason: str = proto.Field( - proto.STRING, - number=5, - ) - train_budget_milli_node_hours: int = proto.Field( - proto.INT64, - number=6, - ) - train_cost_milli_node_hours: int = proto.Field( - proto.INT64, - number=7, - ) - - -class ImageClassificationModelDeploymentMetadata(proto.Message): - r"""Model deployment metadata specific to Image Classification. - - Attributes: - node_count (int): - Input only. The number of nodes to deploy the model on. A - node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the model's - - [node_qps][google.cloud.automl.v1beta1.ImageClassificationModelMetadata.node_qps]. - Must be between 1 and 100, inclusive on both ends. - """ - - node_count: int = proto.Field( - proto.INT64, - number=1, - ) - - -class ImageObjectDetectionModelDeploymentMetadata(proto.Message): - r"""Model deployment metadata specific to Image Object Detection. - - Attributes: - node_count (int): - Input only. The number of nodes to deploy the model on. A - node is an abstraction of a machine resource, which can - handle online prediction QPS as given in the model's - - [qps_per_node][google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata.qps_per_node]. - Must be between 1 and 100, inclusive on both ends. - """ - - node_count: int = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py deleted file mode 100644 index b156bc51..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/io.py +++ /dev/null @@ -1,1253 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'InputConfig', - 'BatchPredictInputConfig', - 'DocumentInputConfig', - 'OutputConfig', - 'BatchPredictOutputConfig', - 'ModelExportOutputConfig', - 'ExportEvaluatedExamplesOutputConfig', - 'GcsSource', - 'BigQuerySource', - 'GcsDestination', - 'BigQueryDestination', - 'GcrDestination', - }, -) - - -class InputConfig(proto.Message): - r"""Input configuration for ImportData Action. - - The format of input depends on dataset_metadata the Dataset into - which the import is happening has. As input source the - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is - expected, unless specified otherwise. Additionally any input .CSV - file by itself must be 100MB or smaller, unless specified otherwise. - If an "example" file (that is, image, video etc.) with identical - content (even if it had different GCS_FILE_PATH) is mentioned - multiple times, then its label, bounding boxes etc. are appended. - The same file should be always provided with the same ML_USE and - GCS_FILE_PATH, if it is not, then these values are - nondeterministically selected from the given ones. - - The formats are represented in EBNF with commas being literal and - with non-terminal symbols defined near the end of this comment. The - formats are: - - - For Image Classification: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH,LABEL,LABEL,... GCS_FILE_PATH leads to image - of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, - .WEBP, .BMP, .TIFF, .ICO For MULTICLASS classification type, at - most one LABEL is allowed per image. If an image has not yet been - labeled, then it should be mentioned just once with no LABEL. - Some sample rows: TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg - - - For Image Object Detection: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) - GCS_FILE_PATH leads to image of up to 30MB in size. Supported - extensions: .JPEG, .GIF, .PNG. Each image is assumed to be - exhaustively labeled. The minimum allowed BOUNDING_BOX edge - length is 0.01, and no more than 500 BOUNDING_BOX-es per image - are allowed (one BOUNDING_BOX is defined per line). If an image - has not yet been labeled, then it should be mentioned just once - with no LABEL and the ",,,,,,," in place of the BOUNDING_BOX. For - images which are known to not contain any bounding boxes, they - should be labelled explictly as "NEGATIVE_IMAGE", followed by - ",,,,,,," in place of the BOUNDING_BOX. Sample rows: - TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, - TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, - - - For Video Classification: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be - used. The GCS_FILE_PATH should lead to another .csv file which - describes examples that have given ML_USE, using the following - row format: - GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) - Here GCS_FILE_PATH leads to a video of up to 50GB in size and up - to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length - of the video, and end has to be after the start. Any segment of a - video which has one or more labels on it, is considered a hard - negative for all other labels. Any segment with no labels on it - is considered to be unknown. If a whole video is unknown, then it - shuold be mentioned just once with ",," in place of LABEL, - TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: - TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file - for a particular ML_USE: - gs://folder/video1.avi,car,120,180.000021 - gs://folder/video1.avi,bike,150,180.000021 - gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - - - For Video Object Tracking: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be - used. The GCS_FILE_PATH should lead to another .csv file which - describes examples that have given ML_USE, using one of the - following row format: - GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or - GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up - to 50GB in size and up to 3h duration. Supported extensions: - .MOV, .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to - obtain a better model. When a specific labeled entity leaves the - video frame, and shows up afterwards it is not required, albeit - preferable, that the same INSTANCE_ID is given to it. TIMESTAMP - must be within the length of the video, the BOUNDING_BOX is - assumed to be drawn on the closest video's frame to the - TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected to be - exhaustively labeled and no more than 500 BOUNDING_BOX-es per - frame are allowed. If a whole video is unknown, then it should be - mentioned just once with ",,,,,,,,,," in place of LABEL, - [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top level CSV file: - TRAIN,gs://folder/train_videos.csv - TEST,gs://folder/test_videos.csv - UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a - CSV file for a particular ML_USE: - gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 - gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 - gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 - gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, - gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, - gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, - gs://folder/video2.avi,,,,,,,,,,, - - - For Text Extraction: CSV file(s) with each line in format: - ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .JSONL (that is, - JSON Lines) file which either imports text in-line or as - documents. Any given .JSONL file must be 100MB or smaller. The - in-line .JSONL file contains, per line, a proto that wraps a - TextSnippet proto (in json representation) followed by one or - more AnnotationPayload protos (called annotations), which have - display_name and text_extraction detail populated. The given text - is expected to be annotated exhaustively, for example, if you - look for animals and text contains "dolphin" that is not labeled, - then "dolphin" is assumed to not be an animal. Any given text - snippet content must be 10KB or smaller, and also be UTF-8 NFC - encoded (ASCII already is). The document .JSONL file contains, - per line, a proto that wraps a Document proto. The Document proto - must have either document_text or input_config set. In - document_text case, the Document proto may also contain the - spatial information of the document, including layout, document - dimension and page number. In input_config case, only PDF - documents are supported now, and each document may be up to 2MB - large. Currently, annotations on documents cannot be specified at - import. Three sample CSV rows: TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl - Sample in-line JSON Lines file for entity extraction (presented - here with artificial line breaks, but the only actual line break - is denoted by \\n).: { "document": { "document_text": {"content": - "dog cat"} "layout": [ { "text_segment": { "start_offset": 0, - "end_offset": 3, }, "page_number": 1, "bounding_poly": { - "normalized_vertices": [ {"x": 0.1, "y": 0.1}, {"x": 0.1, "y": - 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, ], }, - "text_segment_type": TOKEN, }, { "text_segment": { - "start_offset": 4, "end_offset": 7, }, "page_number": 1, - "bounding_poly": { "normalized_vertices": [ {"x": 0.4, "y": 0.1}, - {"x": 0.4, "y": 0.3}, {"x": 0.8, "y": 0.3}, {"x": 0.8, "y": 0.1}, - ], }, "text_segment_type": TOKEN, } - - :: - - ], - "document_dimensions": { - "width": 8.27, - "height": 11.69, - "unit": INCH, - } - "page_count": 1, - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": {"text_segment": {"start_offset": 0, - "end_offset": 3}} - }, - { - "display_name": "animal", - "text_extraction": {"text_segment": {"start_offset": 4, - "end_offset": 7}} - } - ], - }\n - { - "text_snippet": { - "content": "This dog is good." - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 5, "end_offset": 8} - } - } - ] - } - Sample document JSON Lines file (presented here with artificial line - breaks, but the only actual line break is denoted by \n).: - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] - } - } - } - } - - - For Text Classification: CSV file(s) with each line in format: - ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,... - TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If - the column content is a valid gcs file path, i.e. prefixed by - "gs://", it will be treated as a GCS_FILE_PATH, else if the - content is enclosed within double quotes (""), it is treated as a - TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a - .txt file with UTF-8 encoding, for example, - "gs://folder/content.txt", and the content in it is extracted as - a text snippet. In TEXT_SNIPPET case, the column content - excluding quotes is treated as to be imported text snippet. In - both cases, the text snippet/file size must be within 128kB. - Maximum 100 unique labels are allowed per CSV row. Sample rows: - TRAIN,"They have bad food and very rude",RudeService,BadFood - TRAIN,gs://folder/content.txt,SlowService TEST,"Typically always - bad service there.",RudeService VALIDATE,"Stomach ache to - go.",BadFood - - - For Text Sentiment: CSV file(s) with each line in format: - ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and - GCS_FILE_PATH are distinguished by a pattern. If the column - content is a valid gcs file path, that is, prefixed by "gs://", - it is treated as a GCS_FILE_PATH, otherwise it is treated as a - TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a - .txt file with UTF-8 encoding, for example, - "gs://folder/content.txt", and the content in it is extracted as - a text snippet. In TEXT_SNIPPET case, the column content itself - is treated as to be imported text snippet. In both cases, the - text snippet must be up to 500 characters long. Sample rows: - TRAIN,"@freewrytin this is way too good for your product",2 - TRAIN,"I need this product so bad",3 TEST,"Thank you for this - product.",4 VALIDATE,gs://folder/content.txt,2 - - - For Tables: Either - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] - or - - [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source] - can be used. All inputs is concatenated into a single - - [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name] - For gcs_source: CSV file(s), where the first row of the first file - is the header, containing unique column names. If the first row of a - subsequent file is the same as the header, then it is also treated - as a header. All other rows contain values for the corresponding - columns. Each .CSV file by itself must be 10GB or smaller, and their - total size must be 100GB or smaller. First three sample rows of a - CSV file: "Id","First Name","Last Name","Dob","Addresses" - - "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" - - "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} - For bigquery_source: An URI of a BigQuery table. The user data size - of the BigQuery table must be 100GB or smaller. An imported table - must have between 2 and 1,000 columns, inclusive, and between 1000 - and 100,000,000 rows, inclusive. There are at most 5 import data - running in parallel. Definitions: ML_USE = "TRAIN" \| "VALIDATE" \| - "TEST" \| "UNASSIGNED" Describes how the given example (file) should - be used for model training. "UNASSIGNED" can be used when user has - no preference. GCS_FILE_PATH = A path to file on GCS, e.g. - "gs://folder/image1.png". LABEL = A display name of an object on an - image, video etc., e.g. "dog". Must be up to 32 characters long and - can consist only of ASCII Latin letters A-Z and a-z, underscores(_), - and ASCII digits 0-9. For each label an AnnotationSpec is created - which display_name becomes the label; AnnotationSpecs are given back - in predictions. INSTANCE_ID = A positive integer that identifies a - specific instance of a labeled entity on an example. Used e.g. to - track two cars on a video while being able to tell apart which one - is which. BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX \| - VERTEX,,,VERTEX,, A rectangle parallel to the frame of the example - (image, video). If 4 vertices are given they are connected by edges - in the order provided, if 2 are given they are recognized as - diagonally opposite vertices of the rectangle. VERTEX = - COORDINATE,COORDINATE First coordinate is horizontal (x), the second - is vertical (y). COORDINATE = A float in 0 to 1 range, relative to - total length of image or video in given dimension. For fractions the - leading non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is - in top left. TIME_SEGMENT_START = TIME_OFFSET Expresses a beginning, - inclusive, of a time segment within an example that has a time - dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses an - end, exclusive, of a time segment within an example that has a time - dimension (e.g. video). TIME_OFFSET = A number of seconds as - measured from the start of an example (e.g. video). Fractions are - allowed, up to a microsecond precision. "inf" is allowed, and it - means the end of the example. TEXT_SNIPPET = A content of a text - snippet, UTF-8 encoded, enclosed within double quotes (""). - SENTIMENT = An integer between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). - Describes the ordinal of the sentiment - higher value means a more - positive sentiment. All the values are completely relative, i.e. - neither 0 needs to mean a negative or neutral sentiment nor - sentiment_max needs to mean a positive one - it is just required - that 0 is the least positive sentiment in the data, and - sentiment_max is the most positive one. The SENTIMENT shouldn't be - confused with "score" or "magnitude" from the previous Natural - Language Sentiment Analysis API. All SENTIMENT values between 0 and - sentiment_max must be represented in the imported data. On - prediction the same 0 to sentiment_max range will be used. The - difference between neighboring sentiment values needs not to be - uniform, e.g. 1 and 2 may be similar whereas the difference between - 2 and 3 may be huge. - - Errors: If any of the provided CSV files can't be parsed or if more - than certain percent of CSV rows cannot be processed then the - operation fails and nothing is imported. Regardless of overall - success or failure the per-row failures, up to a certain count cap, - is listed in Operation.metadata.partial_failures. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.automl_v1beta1.types.GcsSource): - The Google Cloud Storage location for the input content. In - ImportData, the gcs_source points to a csv with structure - described in the comment. - - This field is a member of `oneof`_ ``source``. - bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource): - The BigQuery location for the input content. - - This field is a member of `oneof`_ ``source``. - params (MutableMapping[str, str]): - Additional domain-specific parameters describing the - semantic of the imported data, any string must be up to - 25000 characters long. - - - For Tables: ``schema_inference_version`` - (integer) - Required. The version of the algorithm that should be - used for the initial inference of the schema (columns' - DataTypes) of the table the data is being imported into. - Allowed values: "1". - """ - - gcs_source: 'GcsSource' = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message='GcsSource', - ) - bigquery_source: 'BigQuerySource' = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message='BigQuerySource', - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class BatchPredictInputConfig(proto.Message): - r"""Input configuration for BatchPredict Action. - - The format of input depends on the ML problem of the model used for - prediction. As input source the - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is - expected, unless specified otherwise. - - The formats are represented in EBNF with commas being literal and - with non-terminal symbols defined near the end of this comment. The - formats are: - - - For Image Classification: CSV file(s) with each line having just - a single column: GCS_FILE_PATH which leads to image of up to 30MB - in size. Supported extensions: .JPEG, .GIF, .PNG. This path is - treated as the ID in the Batch predict output. Three sample rows: - gs://folder/image1.jpeg gs://folder/image2.gif - gs://folder/image3.png - - - For Image Object Detection: CSV file(s) with each line having - just a single column: GCS_FILE_PATH which leads to image of up to - 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. This path - is treated as the ID in the Batch predict output. Three sample - rows: gs://folder/image1.jpeg gs://folder/image2.gif - gs://folder/image3.png - - - For Video Classification: CSV file(s) with each line in format: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH - leads to video of up to 50GB in size and up to 3h duration. - Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length - of the video, and end has to be after the start. Three sample - rows: gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - - - For Video Object Tracking: CSV file(s) with each line in format: - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH - leads to video of up to 50GB in size and up to 3h duration. - Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length - of the video, and end has to be after the start. Three sample - rows: gs://folder/video1.mp4,10,240 - gs://folder/video1.mp4,300,360 gs://folder/vid2.mov,0,inf - - - For Text Classification: CSV file(s) with each line having just a - single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file - can have size upto 128kB. Any given text snippet content must - have 60,000 characters or less. Three sample rows: - gs://folder/text1.txt "Some text content to predict" - gs://folder/text3.pdf Supported file extensions: .txt, .pdf - - - For Text Sentiment: CSV file(s) with each line having just a - single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file - can have size upto 128kB. Any given text snippet content must - have 500 characters or less. Three sample rows: - gs://folder/text1.txt "Some text content to predict" - gs://folder/text3.pdf Supported file extensions: .txt, .pdf - - - For Text Extraction .JSONL (i.e. JSON Lines) file(s) which either - provide text in-line or as documents (for a single BatchPredict - call only one of the these formats may be used). The in-line - .JSONL file(s) contain per line a proto that wraps a temporary - user-assigned TextSnippet ID (string up to 2000 characters long) - called "id", a TextSnippet proto (in json representation) and - zero or more TextFeature protos. Any given text snippet content - must have 30,000 characters or less, and also be UTF-8 NFC - encoded (ASCII already is). The IDs provided should be unique. - The document .JSONL file(s) contain, per line, a proto that wraps - a Document proto with input_config set. Only PDF documents are - supported now, and each document must be up to 2MB large. Any - given .JSONL file must be 100MB or smaller, and no more than 20 - files may be given. Sample in-line JSON Lines file (presented - here with artificial line breaks, but the only actual line break - is denoted by \\n): { "id": "my_first_id", "text_snippet": { - "content": "dog car cat"}, "text_features": [ { "text_segment": - {"start_offset": 4, "end_offset": 6}, "structural_type": - PARAGRAPH, "bounding_poly": { "normalized_vertices": [ {"x": 0.1, - "y": 0.1}, {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, - "y": 0.1}, ] }, } ], }\n { "id": "2", "text_snippet": { - "content": "An elaborate content", "mime_type": "text/plain" } } - Sample document JSON Lines file (presented here with artificial - line breaks, but the only actual line break is denoted by \\n).: - { "document": { "input_config": { "gcs_source": { "input_uris": [ - "gs://folder/document1.pdf" ] } } } }\n { "document": { - "input_config": { "gcs_source": { "input_uris": [ - "gs://folder/document2.pdf" ] } } } } - - - For Tables: Either - [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] - or - - [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]. - GCS case: CSV file(s), each by itself 10GB or smaller and total size - must be 100GB or smaller, where first file must have a header - containing column names. If the first row of a subsequent file is - the same as the header, then it is also treated as a header. All - other rows contain values for the corresponding columns. The column - names must contain the model's - - [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - (order doesn't matter). The columns corresponding to the model's - input feature column specs must contain values compatible with the - column spec's data types. Prediction on all the rows, i.e. the CSV - lines, will be attempted. For FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - all columns having - - [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] - type will be ignored. First three sample rows of a CSV file: "First - Name","Last Name","Dob","Addresses" - - "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" - - "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} - BigQuery case: An URI of a BigQuery table. The user data size of the - BigQuery table must be 100GB or smaller. The column names must - contain the model's - - [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - (order doesn't matter). The columns corresponding to the model's - input feature column specs must contain values compatible with the - column spec's data types. Prediction on all the rows of the table - will be attempted. For FORECASTING - - [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - all columns having - - [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] - type will be ignored. - - Definitions: GCS_FILE_PATH = A path to file on GCS, e.g. - "gs://folder/video.avi". TEXT_SNIPPET = A content of a text snippet, - UTF-8 encoded, enclosed within double quotes ("") TIME_SEGMENT_START - = TIME_OFFSET Expresses a beginning, inclusive, of a time segment - within an example that has a time dimension (e.g. video). - TIME_SEGMENT_END = TIME_OFFSET Expresses an end, exclusive, of a - time segment within an example that has a time dimension (e.g. - video). TIME_OFFSET = A number of seconds as measured from the start - of an example (e.g. video). Fractions are allowed, up to a - microsecond precision. "inf" is allowed and it means the end of the - example. - - Errors: If any of the provided CSV files can't be parsed or if more - than certain percent of CSV rows cannot be processed then the - operation fails and prediction does not happen. Regardless of - overall success or failure the per-row failures, up to a certain - count cap, will be listed in Operation.metadata.partial_failures. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.automl_v1beta1.types.GcsSource): - The Google Cloud Storage location for the - input content. - - This field is a member of `oneof`_ ``source``. - bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource): - The BigQuery location for the input content. - - This field is a member of `oneof`_ ``source``. - """ - - gcs_source: 'GcsSource' = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message='GcsSource', - ) - bigquery_source: 'BigQuerySource' = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message='BigQuerySource', - ) - - -class DocumentInputConfig(proto.Message): - r"""Input configuration of a - [Document][google.cloud.automl.v1beta1.Document]. - - Attributes: - gcs_source (google.cloud.automl_v1beta1.types.GcsSource): - The Google Cloud Storage location of the - document file. Only a single path should be - given. Max supported size: 512MB. - Supported extensions: .PDF. - """ - - gcs_source: 'GcsSource' = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class OutputConfig(proto.Message): - r"""- For Translation: CSV file ``translation.csv``, with each line in - format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file - which describes examples that have given ML_USE, using the - following row format per line: TEXT_SNIPPET (in source language) - \\t TEXT_SNIPPET (in target language) - - - For Tables: Output depends on whether the dataset was imported - from GCS or BigQuery. GCS case: - - [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination] - must be set. Exported are CSV file(s) ``tables_1.csv``, - ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as header - line the table's column names, and all other lines contain values - for the header columns. BigQuery case: - - [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a - new dataset will be created with name - - ``export_data__`` - where will be made BigQuery-dataset-name compatible (e.g. most - special characters will become underscores), and timestamp will be - in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that - dataset a new table called ``primary_table`` will be created, and - filled with precisely the same data as this obtained on import. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): - The Google Cloud Storage location where the output is to be - written to. For Image Object Detection, Text Extraction, - Video Classification and Tables, in the given directory a - new directory will be created with name: export_data-- where - timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. - All export output will be written into that directory. - - This field is a member of `oneof`_ ``destination``. - bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): - The BigQuery location where the output is to - be written to. - - This field is a member of `oneof`_ ``destination``. - """ - - gcs_destination: 'GcsDestination' = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message='GcsDestination', - ) - bigquery_destination: 'BigQueryDestination' = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message='BigQueryDestination', - ) - - -class BatchPredictOutputConfig(proto.Message): - r"""Output configuration for BatchPredict Action. - - As destination the - - [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] - must be set unless specified otherwise for a domain. If - gcs_destination is set then in the given directory a new directory - is created. Its name will be "prediction--", where timestamp is in - YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends - on the ML problem the predictions are made for. - - - For Image Classification: In the created directory files - ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of the successfully predicted images and annotations. A - single image will be listed only once with all its annotations, - and its annotations will never be split across files. Each .JSONL - file will contain, per line, a JSON representation of a proto - that wraps image's "ID" : "" followed by a list of zero - or more AnnotationPayload protos (called annotations), which have - classification detail populated. If prediction for any image - failed (partially or completely), then an additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` - files will be created (N depends on total number of failed - predictions). These files will have a JSON representation of a - proto that wraps the same "ID" : "" but here followed - by exactly one - - [``google.rpc.Status``](https: - //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - containing only ``code`` and ``message``\ fields. - - - For Image Object Detection: In the created directory files - ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of the successfully predicted images and annotations. Each - .JSONL file will contain, per line, a JSON representation of a - proto that wraps image's "ID" : "" followed by a list - of zero or more AnnotationPayload protos (called annotations), - which have image_object_detection detail populated. A single - image will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for - any image failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` - files will be created (N depends on total number of failed - predictions). These files will have a JSON representation of a - proto that wraps the same "ID" : "" but here followed - by exactly one - - [``google.rpc.Status``](https: - //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - containing only ``code`` and ``message``\ fields. - - - For Video Classification: In the created directory a - video_classification.csv file, and a .JSON file per each video - classification requested in the input (i.e. each line in given - CSV(s)), will be created. - - :: - - The format of video_classification.csv is: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 - to 1 the prediction input lines (i.e. video_classification.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = - "OK" if prediction completed successfully, or an error code with - message otherwise. If STATUS is not "OK" then the .JSON file for - that line may not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for the video time segment the file is assigned to in the - video_classification.csv. All AnnotationPayload protos will have - video_classification field set, and will be sorted by - video_classification.type field (note that the returned types are - governed by `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). - - - For Video Object Tracking: In the created directory a - video_object_tracking.csv file will be created, and multiple - files video_object_trackinng_1.json, - video_object_trackinng_2.json,..., video_object_trackinng_N.json, - where N is the number of requests in the input (i.e. the number - of lines in given CSV(s)). - - :: - - The format of video_object_tracking.csv is: - - GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS - where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 - to 1 the prediction input lines (i.e. video_object_tracking.csv has - precisely the same number of lines as the prediction input had.) - JSON_FILE_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = - "OK" if prediction completed successfully, or an error code with - message otherwise. If STATUS is not "OK" then the .JSON file for - that line may not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for each frame of the video time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. - - - For Text Classification: In the created directory files - ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one - - [``google.rpc.Status``](https: - //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - containing only ``code`` and ``message``. - - - For Text Sentiment: In the created directory files - ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will - be created, where N may be 1, and depends on the total number of - inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one - - [``google.rpc.Status``](https: - //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - containing only ``code`` and ``message``. - - - For Text Extraction: In the created directory files - ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` - will be created, where N may be 1, and depends on the total - number of inputs and annotations found. The contents of these - .JSONL file(s) depend on whether the input used inline text, or - documents. If input was inline, then each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - given in request text snippet's "id" (if specified), followed by - input text snippet, and a list of zero or more AnnotationPayload - protos (called annotations), which have text_extraction detail - populated. A single text snippet will be listed only once with - all its annotations, and its annotations will never be split - across files. If input used documents, then each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - given in request document proto, followed by its OCR-ed - representation in the form of a text snippet, finally followed by - a list of zero or more AnnotationPayload protos (called - annotations), which have text_extraction detail populated and - refer, via their indices, to the OCR-ed text snippet. A single - document (and its text snippet) will be listed only once with all - its annotations, and its annotations will never be split across - files. If prediction for any text snippet failed (partially or - completely), then additional ``errors_1.jsonl``, - ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created - (N depends on total number of failed predictions). These files - will have a JSON representation of a proto that wraps either the - "id" : "" (in case of inline) or the document proto (in - case of document) but here followed by exactly one - - [``google.rpc.Status``](https: - //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - containing only ``code`` and ``message``. - - - For Tables: Output depends on whether - - [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] - or - - [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination] - is set (either is allowed). GCS case: In the created directory files - ``tables_1.csv``, ``tables_2.csv``,..., ``tables_N.csv`` will be - created, where N may be 1, and depends on the total number of the - successfully predicted rows. For all CLASSIFICATION - - [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns' - - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - given on input followed by M target column names in the format of - - "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>\_\_score" - where M is the number of distinct target values, i.e. number of - distinct values in the target column of the table used to train the - model. Subsequent lines will contain the respective values of - successfully predicted rows, with the last, i.e. the target, columns - having the corresponding prediction - [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For - REGRESSION and FORECASTING - - [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: - Each .csv file will contain a header, listing all columns' - [display_name-s][google.cloud.automl.v1beta1.display_name] given on - input followed by the predicted target column with name in the - format of - - "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" - Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, column having the - predicted target value. If prediction for any rows failed, then an - additional ``errors_1.csv``, ``errors_2.csv``,..., ``errors_N.csv`` - will be created (N depends on total number of failed rows). These - files will have analogous format as ``tables_*.csv``, but always - with a single target column having - - [``google.rpc.Status``](https: - //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - represented as a JSON string, and containing only ``code`` and - ``message``. BigQuery case: - - [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a - new dataset will be created with name - ``prediction__`` - where will be made BigQuery-dataset-name compatible (e.g. most - special characters will become underscores), and timestamp will be - in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the - dataset two tables will be created, ``predictions``, and ``errors``. - The ``predictions`` table's column names will be the input columns' - - [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] - followed by the target column with name in the format of - - "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" - The input feature columns will contain the respective values of - successfully predicted rows, with the target column having an ARRAY - of - - [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], - represented as STRUCT-s, containing - [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. - The ``errors`` table contains rows for which the prediction has - failed, it has analogous input columns while the target column name - is in the format of - - "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - - [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", - and as a value has - - [``google.rpc.Status``](https: - //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) - represented as a STRUCT, and containing only ``code`` and - ``message``. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): - The Google Cloud Storage location of the - directory where the output is to be written to. - - This field is a member of `oneof`_ ``destination``. - bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): - The BigQuery location where the output is to - be written to. - - This field is a member of `oneof`_ ``destination``. - """ - - gcs_destination: 'GcsDestination' = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message='GcsDestination', - ) - bigquery_destination: 'BigQueryDestination' = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message='BigQueryDestination', - ) - - -class ModelExportOutputConfig(proto.Message): - r"""Output configuration for ModelExport Action. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): - The Google Cloud Storage location where the model is to be - written to. This location may only be set for the following - model formats: "tflite", "edgetpu_tflite", "tf_saved_model", - "tf_js", "core_ml". - - Under the directory given as the destination a new one with - name "model-export--", where timestamp is in - YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. - Inside the model and any of its supporting files will be - written. - - This field is a member of `oneof`_ ``destination``. - gcr_destination (google.cloud.automl_v1beta1.types.GcrDestination): - The GCR location where model image is to be - pushed to. This location may only be set for the - following model formats: - - "docker". - - The model image will be created under the given - URI. - - This field is a member of `oneof`_ ``destination``. - model_format (str): - The format in which the model must be exported. The - available, and default, formats depend on the problem and - model type (if given problem and type combination doesn't - have a format listed, it means its models are not - exportable): - - - For Image Classification mobile-low-latency-1, - mobile-versatile-1, mobile-high-accuracy-1: "tflite" - (default), "edgetpu_tflite", "tf_saved_model", "tf_js", - "docker". - - - For Image Classification mobile-core-ml-low-latency-1, - mobile-core-ml-versatile-1, - mobile-core-ml-high-accuracy-1: "core_ml" (default). - - - For Image Object Detection mobile-low-latency-1, - mobile-versatile-1, mobile-high-accuracy-1: "tflite", - "tf_saved_model", "tf_js". - - - For Video Classification cloud, "tf_saved_model". - - - For Video Object Tracking cloud, "tf_saved_model". - - - For Video Object Tracking mobile-versatile-1: "tflite", - "edgetpu_tflite", "tf_saved_model", "docker". - - - For Video Object Tracking mobile-coral-versatile-1: - "tflite", "edgetpu_tflite", "docker". - - - For Video Object Tracking mobile-coral-low-latency-1: - "tflite", "edgetpu_tflite", "docker". - - - For Video Object Tracking mobile-jetson-versatile-1: - "tf_saved_model", "docker". - - - For Tables: "docker". - - Formats description: - - - tflite - Used for Android mobile devices. - - edgetpu_tflite - Used for `Edge - TPU `__ devices. - - tf_saved_model - A tensorflow model in SavedModel format. - - tf_js - A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. - - docker - Used for Docker containers. Use the params field - to customize the container. The container is verified to - work correctly on ubuntu 16.04 operating system. See more - at [containers - - quickstart](https: - //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) - - - core_ml - Used for iOS mobile devices. - params (MutableMapping[str, str]): - Additional model-type and format specific parameters - describing the requirements for the to be exported model - files, any string must be up to 25000 characters long. - - - For ``docker`` format: ``cpu_architecture`` - (string) - "x86_64" (default). ``gpu_architecture`` - (string) - "none" (default), "nvidia". - """ - - gcs_destination: 'GcsDestination' = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message='GcsDestination', - ) - gcr_destination: 'GcrDestination' = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message='GcrDestination', - ) - model_format: str = proto.Field( - proto.STRING, - number=4, - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class ExportEvaluatedExamplesOutputConfig(proto.Message): - r"""Output configuration for ExportEvaluatedExamples Action. Note that - this call is available only for 30 days since the moment the model - was evaluated. The output depends on the domain, as follows (note - that only examples from the TEST set are exported): - - - For Tables: - - [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] - pointing to a BigQuery project must be set. In the given project a - new dataset will be created with name - - ``export_evaluated_examples__`` - where will be made BigQuery-dataset-name compatible (e.g. most - special characters will become underscores), and timestamp will be - in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the - dataset an ``evaluated_examples`` table will be created. It will - have all the same columns as the - - [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id] - of the [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from - which the model was created, as they were at the moment of model's - evaluation (this includes the target column with its ground truth), - followed by a column called "predicted_". That last - column will contain the model's prediction result for each - respective row, given as ARRAY of - [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], - represented as STRUCT-s, containing - [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): - The BigQuery location where the output is to - be written to. - - This field is a member of `oneof`_ ``destination``. - """ - - bigquery_destination: 'BigQueryDestination' = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message='BigQueryDestination', - ) - - -class GcsSource(proto.Message): - r"""The Google Cloud Storage location for the input content. - - Attributes: - input_uris (MutableSequence[str]): - Required. Google Cloud Storage URIs to input files, up to - 2000 characters long. Accepted forms: - - - Full object path, e.g. gs://bucket/directory/object.csv - """ - - input_uris: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class BigQuerySource(proto.Message): - r"""The BigQuery location for the input content. - - Attributes: - input_uri (str): - Required. BigQuery URI to a table, up to 2000 characters - long. Accepted forms: - - - BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId - """ - - input_uri: str = proto.Field( - proto.STRING, - number=1, - ) - - -class GcsDestination(proto.Message): - r"""The Google Cloud Storage location where the output is to be - written to. - - Attributes: - output_uri_prefix (str): - Required. Google Cloud Storage URI to output directory, up - to 2000 characters long. Accepted forms: - - - Prefix path: gs://bucket/directory The requesting user - must have write permission to the bucket. The directory - is created if it doesn't exist. - """ - - output_uri_prefix: str = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQueryDestination(proto.Message): - r"""The BigQuery location for the output content. - - Attributes: - output_uri (str): - Required. BigQuery URI to a project, up to 2000 characters - long. Accepted forms: - - - BigQuery path e.g. bq://projectId - """ - - output_uri: str = proto.Field( - proto.STRING, - number=1, - ) - - -class GcrDestination(proto.Message): - r"""The GCR location where the image must be pushed to. - - Attributes: - output_uri (str): - Required. Google Contained Registry URI of the new image, up - to 2000 characters long. See - - https: //cloud.google.com/container-registry/do // - cs/pushing-and-pulling#pushing_an_image_to_a_registry - Accepted forms: - - - [HOSTNAME]/[PROJECT-ID]/[IMAGE] - - [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG] - - The requesting user must have permission to push images the - project. - """ - - output_uri: str = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py deleted file mode 100644 index f83543cb..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model.py +++ /dev/null @@ -1,208 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import image -from google.cloud.automl_v1beta1.types import tables -from google.cloud.automl_v1beta1.types import text -from google.cloud.automl_v1beta1.types import translation -from google.cloud.automl_v1beta1.types import video -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'Model', - }, -) - - -class Model(proto.Message): - r"""API proto representing a trained machine learning model. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - translation_model_metadata (google.cloud.automl_v1beta1.types.TranslationModelMetadata): - Metadata for translation models. - - This field is a member of `oneof`_ ``model_metadata``. - image_classification_model_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelMetadata): - Metadata for image classification models. - - This field is a member of `oneof`_ ``model_metadata``. - text_classification_model_metadata (google.cloud.automl_v1beta1.types.TextClassificationModelMetadata): - Metadata for text classification models. - - This field is a member of `oneof`_ ``model_metadata``. - image_object_detection_model_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelMetadata): - Metadata for image object detection models. - - This field is a member of `oneof`_ ``model_metadata``. - video_classification_model_metadata (google.cloud.automl_v1beta1.types.VideoClassificationModelMetadata): - Metadata for video classification models. - - This field is a member of `oneof`_ ``model_metadata``. - video_object_tracking_model_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingModelMetadata): - Metadata for video object tracking models. - - This field is a member of `oneof`_ ``model_metadata``. - text_extraction_model_metadata (google.cloud.automl_v1beta1.types.TextExtractionModelMetadata): - Metadata for text extraction models. - - This field is a member of `oneof`_ ``model_metadata``. - tables_model_metadata (google.cloud.automl_v1beta1.types.TablesModelMetadata): - Metadata for Tables models. - - This field is a member of `oneof`_ ``model_metadata``. - text_sentiment_model_metadata (google.cloud.automl_v1beta1.types.TextSentimentModelMetadata): - Metadata for text sentiment models. - - This field is a member of `oneof`_ ``model_metadata``. - name (str): - Output only. Resource name of the model. Format: - ``projects/{project_id}/locations/{location_id}/models/{model_id}`` - display_name (str): - Required. The name of the model to show in the interface. - The name can be up to 32 characters long and can consist - only of ASCII Latin letters A-Z and a-z, underscores (_), - and ASCII digits 0-9. It must start with a letter. - dataset_id (str): - Required. The resource ID of the dataset used - to create the model. The dataset must come from - the same ancestor project and location. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the model - training finished and can be used for - prediction. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this model was - last updated. - deployment_state (google.cloud.automl_v1beta1.types.Model.DeploymentState): - Output only. Deployment state of the model. A - model can only serve prediction requests after - it gets deployed. - """ - class DeploymentState(proto.Enum): - r"""Deployment state of the model. - - Values: - DEPLOYMENT_STATE_UNSPECIFIED (0): - Should not be used, an un-set enum has this - value by default. - DEPLOYED (1): - Model is deployed. - UNDEPLOYED (2): - Model is not deployed. - """ - DEPLOYMENT_STATE_UNSPECIFIED = 0 - DEPLOYED = 1 - UNDEPLOYED = 2 - - translation_model_metadata: translation.TranslationModelMetadata = proto.Field( - proto.MESSAGE, - number=15, - oneof='model_metadata', - message=translation.TranslationModelMetadata, - ) - image_classification_model_metadata: image.ImageClassificationModelMetadata = proto.Field( - proto.MESSAGE, - number=13, - oneof='model_metadata', - message=image.ImageClassificationModelMetadata, - ) - text_classification_model_metadata: text.TextClassificationModelMetadata = proto.Field( - proto.MESSAGE, - number=14, - oneof='model_metadata', - message=text.TextClassificationModelMetadata, - ) - image_object_detection_model_metadata: image.ImageObjectDetectionModelMetadata = proto.Field( - proto.MESSAGE, - number=20, - oneof='model_metadata', - message=image.ImageObjectDetectionModelMetadata, - ) - video_classification_model_metadata: video.VideoClassificationModelMetadata = proto.Field( - proto.MESSAGE, - number=23, - oneof='model_metadata', - message=video.VideoClassificationModelMetadata, - ) - video_object_tracking_model_metadata: video.VideoObjectTrackingModelMetadata = proto.Field( - proto.MESSAGE, - number=21, - oneof='model_metadata', - message=video.VideoObjectTrackingModelMetadata, - ) - text_extraction_model_metadata: text.TextExtractionModelMetadata = proto.Field( - proto.MESSAGE, - number=19, - oneof='model_metadata', - message=text.TextExtractionModelMetadata, - ) - tables_model_metadata: tables.TablesModelMetadata = proto.Field( - proto.MESSAGE, - number=24, - oneof='model_metadata', - message=tables.TablesModelMetadata, - ) - text_sentiment_model_metadata: text.TextSentimentModelMetadata = proto.Field( - proto.MESSAGE, - number=22, - oneof='model_metadata', - message=text.TextSentimentModelMetadata, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - dataset_id: str = proto.Field( - proto.STRING, - number=3, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - update_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - deployment_state: DeploymentState = proto.Field( - proto.ENUM, - number=8, - enum=DeploymentState, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py deleted file mode 100644 index f195068a..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/model_evaluation.py +++ /dev/null @@ -1,196 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import classification -from google.cloud.automl_v1beta1.types import detection -from google.cloud.automl_v1beta1.types import regression -from google.cloud.automl_v1beta1.types import text_extraction -from google.cloud.automl_v1beta1.types import text_sentiment -from google.cloud.automl_v1beta1.types import translation -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'ModelEvaluation', - }, -) - - -class ModelEvaluation(proto.Message): - r"""Evaluation results of a model. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - classification_evaluation_metrics (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics): - Model evaluation metrics for image, text, - video and tables classification. - Tables problem is considered a classification - when the target column is CATEGORY DataType. - - This field is a member of `oneof`_ ``metrics``. - regression_evaluation_metrics (google.cloud.automl_v1beta1.types.RegressionEvaluationMetrics): - Model evaluation metrics for Tables - regression. Tables problem is considered a - regression when the target column has FLOAT64 - DataType. - - This field is a member of `oneof`_ ``metrics``. - translation_evaluation_metrics (google.cloud.automl_v1beta1.types.TranslationEvaluationMetrics): - Model evaluation metrics for translation. - - This field is a member of `oneof`_ ``metrics``. - image_object_detection_evaluation_metrics (google.cloud.automl_v1beta1.types.ImageObjectDetectionEvaluationMetrics): - Model evaluation metrics for image object - detection. - - This field is a member of `oneof`_ ``metrics``. - video_object_tracking_evaluation_metrics (google.cloud.automl_v1beta1.types.VideoObjectTrackingEvaluationMetrics): - Model evaluation metrics for video object - tracking. - - This field is a member of `oneof`_ ``metrics``. - text_sentiment_evaluation_metrics (google.cloud.automl_v1beta1.types.TextSentimentEvaluationMetrics): - Evaluation metrics for text sentiment models. - - This field is a member of `oneof`_ ``metrics``. - text_extraction_evaluation_metrics (google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics): - Evaluation metrics for text extraction - models. - - This field is a member of `oneof`_ ``metrics``. - name (str): - Output only. Resource name of the model evaluation. Format: - - ``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}`` - annotation_spec_id (str): - Output only. The ID of the annotation spec that the model - evaluation applies to. The The ID is empty for the overall - model evaluation. For Tables annotation specs in the dataset - do not exist and this ID is always not set, but for - CLASSIFICATION - - [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] - the - [display_name][google.cloud.automl.v1beta1.ModelEvaluation.display_name] - field is used. - display_name (str): - Output only. The value of - [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name] - at the moment when the model was trained. Because this field - returns a value at model training time, for different models - trained from the same dataset, the values may differ, since - display names could had been changed between the two model's - trainings. For Tables CLASSIFICATION - - [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type] - distinct values of the target column at the moment of the - model evaluation are populated here. The display_name is - empty for the overall model evaluation. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this model - evaluation was created. - evaluated_example_count (int): - Output only. The number of examples used for model - evaluation, i.e. for which ground truth from time of model - creation is compared against the predicted annotations - created by the model. For overall ModelEvaluation (i.e. with - annotation_spec_id not set) this is the total number of all - examples used for evaluation. Otherwise, this is the count - of examples that according to the ground truth were - annotated by the - - [annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id]. - """ - - classification_evaluation_metrics: classification.ClassificationEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=8, - oneof='metrics', - message=classification.ClassificationEvaluationMetrics, - ) - regression_evaluation_metrics: regression.RegressionEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=24, - oneof='metrics', - message=regression.RegressionEvaluationMetrics, - ) - translation_evaluation_metrics: translation.TranslationEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=9, - oneof='metrics', - message=translation.TranslationEvaluationMetrics, - ) - image_object_detection_evaluation_metrics: detection.ImageObjectDetectionEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=12, - oneof='metrics', - message=detection.ImageObjectDetectionEvaluationMetrics, - ) - video_object_tracking_evaluation_metrics: detection.VideoObjectTrackingEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=14, - oneof='metrics', - message=detection.VideoObjectTrackingEvaluationMetrics, - ) - text_sentiment_evaluation_metrics: text_sentiment.TextSentimentEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=11, - oneof='metrics', - message=text_sentiment.TextSentimentEvaluationMetrics, - ) - text_extraction_evaluation_metrics: text_extraction.TextExtractionEvaluationMetrics = proto.Field( - proto.MESSAGE, - number=13, - oneof='metrics', - message=text_extraction.TextExtractionEvaluationMetrics, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - annotation_spec_id: str = proto.Field( - proto.STRING, - number=2, - ) - display_name: str = proto.Field( - proto.STRING, - number=15, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - evaluated_example_count: int = proto.Field( - proto.INT32, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py deleted file mode 100644 index 8916452c..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/operations.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import io -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'OperationMetadata', - 'DeleteOperationMetadata', - 'DeployModelOperationMetadata', - 'UndeployModelOperationMetadata', - 'CreateModelOperationMetadata', - 'ImportDataOperationMetadata', - 'ExportDataOperationMetadata', - 'BatchPredictOperationMetadata', - 'ExportModelOperationMetadata', - 'ExportEvaluatedExamplesOperationMetadata', - }, -) - - -class OperationMetadata(proto.Message): - r"""Metadata used across all long running operations returned by - AutoML API. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - delete_details (google.cloud.automl_v1beta1.types.DeleteOperationMetadata): - Details of a Delete operation. - - This field is a member of `oneof`_ ``details``. - deploy_model_details (google.cloud.automl_v1beta1.types.DeployModelOperationMetadata): - Details of a DeployModel operation. - - This field is a member of `oneof`_ ``details``. - undeploy_model_details (google.cloud.automl_v1beta1.types.UndeployModelOperationMetadata): - Details of an UndeployModel operation. - - This field is a member of `oneof`_ ``details``. - create_model_details (google.cloud.automl_v1beta1.types.CreateModelOperationMetadata): - Details of CreateModel operation. - - This field is a member of `oneof`_ ``details``. - import_data_details (google.cloud.automl_v1beta1.types.ImportDataOperationMetadata): - Details of ImportData operation. - - This field is a member of `oneof`_ ``details``. - batch_predict_details (google.cloud.automl_v1beta1.types.BatchPredictOperationMetadata): - Details of BatchPredict operation. - - This field is a member of `oneof`_ ``details``. - export_data_details (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata): - Details of ExportData operation. - - This field is a member of `oneof`_ ``details``. - export_model_details (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata): - Details of ExportModel operation. - - This field is a member of `oneof`_ ``details``. - export_evaluated_examples_details (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata): - Details of ExportEvaluatedExamples operation. - - This field is a member of `oneof`_ ``details``. - progress_percent (int): - Output only. Progress of operation. Range: [0, 100]. Not - used currently. - partial_failures (MutableSequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - E.g. single files that couldn't be read. - This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - updated for the last time. - """ - - delete_details: 'DeleteOperationMetadata' = proto.Field( - proto.MESSAGE, - number=8, - oneof='details', - message='DeleteOperationMetadata', - ) - deploy_model_details: 'DeployModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=24, - oneof='details', - message='DeployModelOperationMetadata', - ) - undeploy_model_details: 'UndeployModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=25, - oneof='details', - message='UndeployModelOperationMetadata', - ) - create_model_details: 'CreateModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=10, - oneof='details', - message='CreateModelOperationMetadata', - ) - import_data_details: 'ImportDataOperationMetadata' = proto.Field( - proto.MESSAGE, - number=15, - oneof='details', - message='ImportDataOperationMetadata', - ) - batch_predict_details: 'BatchPredictOperationMetadata' = proto.Field( - proto.MESSAGE, - number=16, - oneof='details', - message='BatchPredictOperationMetadata', - ) - export_data_details: 'ExportDataOperationMetadata' = proto.Field( - proto.MESSAGE, - number=21, - oneof='details', - message='ExportDataOperationMetadata', - ) - export_model_details: 'ExportModelOperationMetadata' = proto.Field( - proto.MESSAGE, - number=22, - oneof='details', - message='ExportModelOperationMetadata', - ) - export_evaluated_examples_details: 'ExportEvaluatedExamplesOperationMetadata' = proto.Field( - proto.MESSAGE, - number=26, - oneof='details', - message='ExportEvaluatedExamplesOperationMetadata', - ) - progress_percent: int = proto.Field( - proto.INT32, - number=13, - ) - partial_failures: MutableSequence[status_pb2.Status] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=status_pb2.Status, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteOperationMetadata(proto.Message): - r"""Details of operations that perform deletes of any entities. - """ - - -class DeployModelOperationMetadata(proto.Message): - r"""Details of DeployModel operation. - """ - - -class UndeployModelOperationMetadata(proto.Message): - r"""Details of UndeployModel operation. - """ - - -class CreateModelOperationMetadata(proto.Message): - r"""Details of CreateModel operation. - """ - - -class ImportDataOperationMetadata(proto.Message): - r"""Details of ImportData operation. - """ - - -class ExportDataOperationMetadata(proto.Message): - r"""Details of ExportData operation. - - Attributes: - output_info (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata.ExportDataOutputInfo): - Output only. Information further describing - this export data's output. - """ - - class ExportDataOutputInfo(proto.Message): - r"""Further describes this export data's output. Supplements - [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_output_directory (str): - The full path of the Google Cloud Storage - directory created, into which the exported data - is written. - - This field is a member of `oneof`_ ``output_location``. - bigquery_output_dataset (str): - The path of the BigQuery dataset created, in - bq://projectId.bqDatasetId format, into which - the exported data is written. - - This field is a member of `oneof`_ ``output_location``. - """ - - gcs_output_directory: str = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - bigquery_output_dataset: str = proto.Field( - proto.STRING, - number=2, - oneof='output_location', - ) - - output_info: ExportDataOutputInfo = proto.Field( - proto.MESSAGE, - number=1, - message=ExportDataOutputInfo, - ) - - -class BatchPredictOperationMetadata(proto.Message): - r"""Details of BatchPredict operation. - - Attributes: - input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): - Output only. The input config that was given - upon starting this batch predict operation. - output_info (google.cloud.automl_v1beta1.types.BatchPredictOperationMetadata.BatchPredictOutputInfo): - Output only. Information further describing - this batch predict's output. - """ - - class BatchPredictOutputInfo(proto.Message): - r"""Further describes this batch predict's output. Supplements - - [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_output_directory (str): - The full path of the Google Cloud Storage - directory created, into which the prediction - output is written. - - This field is a member of `oneof`_ ``output_location``. - bigquery_output_dataset (str): - The path of the BigQuery dataset created, in - bq://projectId.bqDatasetId format, into which - the prediction output is written. - - This field is a member of `oneof`_ ``output_location``. - """ - - gcs_output_directory: str = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - bigquery_output_dataset: str = proto.Field( - proto.STRING, - number=2, - oneof='output_location', - ) - - input_config: io.BatchPredictInputConfig = proto.Field( - proto.MESSAGE, - number=1, - message=io.BatchPredictInputConfig, - ) - output_info: BatchPredictOutputInfo = proto.Field( - proto.MESSAGE, - number=2, - message=BatchPredictOutputInfo, - ) - - -class ExportModelOperationMetadata(proto.Message): - r"""Details of ExportModel operation. - - Attributes: - output_info (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata.ExportModelOutputInfo): - Output only. Information further describing - the output of this model export. - """ - - class ExportModelOutputInfo(proto.Message): - r"""Further describes the output of model export. Supplements - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - - Attributes: - gcs_output_directory (str): - The full path of the Google Cloud Storage - directory created, into which the model will be - exported. - """ - - gcs_output_directory: str = proto.Field( - proto.STRING, - number=1, - ) - - output_info: ExportModelOutputInfo = proto.Field( - proto.MESSAGE, - number=2, - message=ExportModelOutputInfo, - ) - - -class ExportEvaluatedExamplesOperationMetadata(proto.Message): - r"""Details of EvaluatedExamples operation. - - Attributes: - output_info (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo): - Output only. Information further describing - the output of this evaluated examples export. - """ - - class ExportEvaluatedExamplesOutputInfo(proto.Message): - r"""Further describes the output of the evaluated examples export. - Supplements - - [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig]. - - Attributes: - bigquery_output_dataset (str): - The path of the BigQuery dataset created, in - bq://projectId.bqDatasetId format, into which - the output of export evaluated examples is - written. - """ - - bigquery_output_dataset: str = proto.Field( - proto.STRING, - number=2, - ) - - output_info: ExportEvaluatedExamplesOutputInfo = proto.Field( - proto.MESSAGE, - number=2, - message=ExportEvaluatedExamplesOutputInfo, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py deleted file mode 100644 index d4a9abaa..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/prediction_service.py +++ /dev/null @@ -1,285 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import annotation_payload -from google.cloud.automl_v1beta1.types import data_items -from google.cloud.automl_v1beta1.types import io - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'PredictRequest', - 'PredictResponse', - 'BatchPredictRequest', - 'BatchPredictResult', - }, -) - - -class PredictRequest(proto.Message): - r"""Request message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - - Attributes: - name (str): - Required. Name of the model requested to - serve the prediction. - payload (google.cloud.automl_v1beta1.types.ExamplePayload): - Required. Payload to perform a prediction on. - The payload must match the problem type that the - model was trained to solve. - params (MutableMapping[str, str]): - Additional domain-specific parameters, any string must be up - to 25000 characters long. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to 1.0. - When the model makes predictions for an image, it will - only produce results that have at least this confidence - score. The default is 0.5. - - - For Image Object Detection: ``score_threshold`` - (float) - When Model detects objects on the image, it will only - produce bounding boxes which have at least this - confidence score. Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than this - number of bounding boxes will be returned in the - response. Default is 100, the requested value may be - limited by server. - - - For Tables: feature_importance - (boolean) Whether - feature importance should be populated in the returned - TablesAnnotation. The default is false. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - payload: data_items.ExamplePayload = proto.Field( - proto.MESSAGE, - number=2, - message=data_items.ExamplePayload, - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - - -class PredictResponse(proto.Message): - r"""Response message for - [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - - Attributes: - payload (MutableSequence[google.cloud.automl_v1beta1.types.AnnotationPayload]): - Prediction result. - Translation and Text Sentiment will return - precisely one payload. - preprocessed_input (google.cloud.automl_v1beta1.types.ExamplePayload): - The preprocessed example that AutoML actually makes - prediction on. Empty if AutoML does not preprocess the input - example. - - - For Text Extraction: If the input is a .pdf file, the - OCR'ed text will be provided in - [document_text][google.cloud.automl.v1beta1.Document.document_text]. - metadata (MutableMapping[str, str]): - Additional domain-specific prediction response metadata. - - - For Image Object Detection: ``max_bounding_box_count`` - - (int64) At most that many bounding boxes per image could - have been returned. - - - For Text Sentiment: ``sentiment_score`` - (float, - deprecated) A value between -1 and 1, -1 maps to least - positive sentiment, while 1 maps to the most positive one - and the higher the score, the more positive the sentiment - in the document is. Yet these values are relative to the - training data, so e.g. if all data was positive then -1 - will be also positive (though the least). The - sentiment_score shouldn't be confused with "score" or - "magnitude" from the previous Natural Language Sentiment - Analysis API. - """ - - payload: MutableSequence[annotation_payload.AnnotationPayload] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=annotation_payload.AnnotationPayload, - ) - preprocessed_input: data_items.ExamplePayload = proto.Field( - proto.MESSAGE, - number=3, - message=data_items.ExamplePayload, - ) - metadata: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - - -class BatchPredictRequest(proto.Message): - r"""Request message for - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - - Attributes: - name (str): - Required. Name of the model requested to - serve the batch prediction. - input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): - Required. The input configuration for batch - prediction. - output_config (google.cloud.automl_v1beta1.types.BatchPredictOutputConfig): - Required. The Configuration specifying where - output predictions should be written. - params (MutableMapping[str, str]): - Required. Additional domain-specific parameters for the - predictions, any string must be up to 25000 characters long. - - - For Text Classification: - - ``score_threshold`` - (float) A value from 0.0 to 1.0. - When the model makes predictions for a text snippet, it - will only produce results that have at least this - confidence score. The default is 0.5. - - - For Image Classification: - - ``score_threshold`` - (float) A value from 0.0 to 1.0. - When the model makes predictions for an image, it will - only produce results that have at least this confidence - score. The default is 0.5. - - - For Image Object Detection: - - ``score_threshold`` - (float) When Model detects objects - on the image, it will only produce bounding boxes which - have at least this confidence score. Value in 0 to 1 - range, default is 0.5. ``max_bounding_box_count`` - - (int64) No more than this number of bounding boxes will - be produced per image. Default is 100, the requested - value may be limited by server. - - - For Video Classification : - - ``score_threshold`` - (float) A value from 0.0 to 1.0. - When the model makes predictions for a video, it will - only produce results that have at least this confidence - score. The default is 0.5. ``segment_classification`` - - (boolean) Set to true to request segment-level - classification. AutoML Video Intelligence returns labels - and their confidence scores for the entire segment of the - video that user specified in the request configuration. - The default is "true". ``shot_classification`` - - (boolean) Set to true to request shot-level - classification. AutoML Video Intelligence determines the - boundaries for each camera shot in the entire segment of - the video that user specified in the request - configuration. AutoML Video Intelligence then returns - labels and their confidence scores for each detected - shot, along with the start and end time of the shot. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends on - training data, but there are no metrics provided to - describe that quality. The default is "false". - ``1s_interval_classification`` - (boolean) Set to true to - request classification for a video at one-second - intervals. AutoML Video Intelligence returns labels and - their confidence scores for each second of the entire - segment of the video that user specified in the request - configuration. WARNING: Model evaluation is not done for - this classification type, the quality of it depends on - training data, but there are no metrics provided to - describe that quality. The default is "false". - - - For Tables: - - feature_importance - (boolean) Whether feature importance - should be populated in the returned TablesAnnotations. - The default is false. - - - For Video Object Tracking: - - ``score_threshold`` - (float) When Model detects objects - on video frames, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to - 1 range, default is 0.5. ``max_bounding_box_count`` - - (int64) No more than this number of bounding boxes will - be returned per frame. Default is 100, the requested - value may be limited by server. ``min_bounding_box_size`` - - (float) Only bounding boxes with shortest edge at least - that long as a relative value of video frame size will be - returned. Value in 0 to 1 range. Default is 0. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - input_config: io.BatchPredictInputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.BatchPredictInputConfig, - ) - output_config: io.BatchPredictOutputConfig = proto.Field( - proto.MESSAGE, - number=4, - message=io.BatchPredictOutputConfig, - ) - params: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - - -class BatchPredictResult(proto.Message): - r"""Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of the operation - returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - - Attributes: - metadata (MutableMapping[str, str]): - Additional domain-specific prediction response metadata. - - - For Image Object Detection: ``max_bounding_box_count`` - - (int64) At most that many bounding boxes per image could - have been returned. - - - For Video Object Tracking: ``max_bounding_box_count`` - - (int64) At most that many bounding boxes per frame could - have been returned. - """ - - metadata: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py deleted file mode 100644 index 262e14b0..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/ranges.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'DoubleRange', - }, -) - - -class DoubleRange(proto.Message): - r"""A range between two double numbers. - - Attributes: - start (float): - Start of the range, inclusive. - end (float): - End of the range, exclusive. - """ - - start: float = proto.Field( - proto.DOUBLE, - number=1, - ) - end: float = proto.Field( - proto.DOUBLE, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py deleted file mode 100644 index 123eda6c..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/regression.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'RegressionEvaluationMetrics', - }, -) - - -class RegressionEvaluationMetrics(proto.Message): - r"""Metrics for regression problems. - - Attributes: - root_mean_squared_error (float): - Output only. Root Mean Squared Error (RMSE). - mean_absolute_error (float): - Output only. Mean Absolute Error (MAE). - mean_absolute_percentage_error (float): - Output only. Mean absolute percentage error. - Only set if all ground truth values are are - positive. - r_squared (float): - Output only. R squared. - root_mean_squared_log_error (float): - Output only. Root mean squared log error. - """ - - root_mean_squared_error: float = proto.Field( - proto.FLOAT, - number=1, - ) - mean_absolute_error: float = proto.Field( - proto.FLOAT, - number=2, - ) - mean_absolute_percentage_error: float = proto.Field( - proto.FLOAT, - number=3, - ) - r_squared: float = proto.Field( - proto.FLOAT, - number=4, - ) - root_mean_squared_log_error: float = proto.Field( - proto.FLOAT, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py deleted file mode 100644 index 28dd4971..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/service.py +++ /dev/null @@ -1,874 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import image -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import model as gca_model -from google.cloud.automl_v1beta1.types import model_evaluation as gca_model_evaluation -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'CreateDatasetRequest', - 'GetDatasetRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeleteDatasetRequest', - 'ImportDataRequest', - 'ExportDataRequest', - 'GetAnnotationSpecRequest', - 'GetTableSpecRequest', - 'ListTableSpecsRequest', - 'ListTableSpecsResponse', - 'UpdateTableSpecRequest', - 'GetColumnSpecRequest', - 'ListColumnSpecsRequest', - 'ListColumnSpecsResponse', - 'UpdateColumnSpecRequest', - 'CreateModelRequest', - 'GetModelRequest', - 'ListModelsRequest', - 'ListModelsResponse', - 'DeleteModelRequest', - 'DeployModelRequest', - 'UndeployModelRequest', - 'ExportModelRequest', - 'ExportEvaluatedExamplesRequest', - 'GetModelEvaluationRequest', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - }, -) - - -class CreateDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. - - Attributes: - parent (str): - Required. The resource name of the project to - create the dataset for. - dataset (google.cloud.automl_v1beta1.types.Dataset): - Required. The dataset to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - dataset: gca_dataset.Dataset = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.Dataset, - ) - - -class GetDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - - Attributes: - name (str): - Required. The resource name of the dataset to - retrieve. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListDatasetsRequest(proto.Message): - r"""Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - - Attributes: - parent (str): - Required. The resource name of the project - from which to list datasets. - filter (str): - An expression for filtering the results of the request. - - - ``dataset_metadata`` - for existence of the case (e.g. - ``image_classification_dataset_metadata:*``). Some - examples of using the filter are: - - - ``translation_dataset_metadata:*`` --> The dataset has - ``translation_dataset_metadata``. - page_size (int): - Requested page size. Server may return fewer - results than requested. If unspecified, server - will pick a default size. - page_token (str): - A token identifying a page of results for the server to - return Typically obtained via - [ListDatasetsResponse.next_page_token][google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_token] - of the previous - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDatasetsResponse(proto.Message): - r"""Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - - Attributes: - datasets (MutableSequence[google.cloud.automl_v1beta1.types.Dataset]): - The datasets read. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListDatasetsRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - datasets: MutableSequence[gca_dataset.Dataset] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - - Attributes: - dataset (google.cloud.automl_v1beta1.types.Dataset): - Required. The dataset which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. - """ - - dataset: gca_dataset.Dataset = proto.Field( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteDatasetRequest(proto.Message): - r"""Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. - - Attributes: - name (str): - Required. The resource name of the dataset to - delete. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ImportDataRequest(proto.Message): - r"""Request message for - [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - - Attributes: - name (str): - Required. Dataset name. Dataset must already - exist. All imported annotations and examples - will be added. - input_config (google.cloud.automl_v1beta1.types.InputConfig): - Required. The desired input location and its - domain specific semantics, if any. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - input_config: io.InputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.InputConfig, - ) - - -class ExportDataRequest(proto.Message): - r"""Request message for - [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - - Attributes: - name (str): - Required. The resource name of the dataset. - output_config (google.cloud.automl_v1beta1.types.OutputConfig): - Required. The desired output location. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - output_config: io.OutputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.OutputConfig, - ) - - -class GetAnnotationSpecRequest(proto.Message): - r"""Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. - - Attributes: - name (str): - Required. The resource name of the annotation - spec to retrieve. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class GetTableSpecRequest(proto.Message): - r"""Request message for - [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. - - Attributes: - name (str): - Required. The resource name of the table spec - to retrieve. - field_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - field_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListTableSpecsRequest(proto.Message): - r"""Request message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - - Attributes: - parent (str): - Required. The resource name of the dataset to - list table specs from. - field_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - filter (str): - Filter expression, see go/filtering. - page_size (int): - Requested page size. The server can return - fewer results than requested. If unspecified, - the server will pick a default size. - page_token (str): - A token identifying a page of results for the server to - return. Typically obtained from the - [ListTableSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListTableSpecsResponse.next_page_token] - field of the previous - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - field_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListTableSpecsResponse(proto.Message): - r"""Response message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - - Attributes: - table_specs (MutableSequence[google.cloud.automl_v1beta1.types.TableSpec]): - The table specs read. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListTableSpecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSpecsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - table_specs: MutableSequence[gca_table_spec.TableSpec] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_table_spec.TableSpec, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTableSpecRequest(proto.Message): - r"""Request message for - [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - - Attributes: - table_spec (google.cloud.automl_v1beta1.types.TableSpec): - Required. The table spec which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. - """ - - table_spec: gca_table_spec.TableSpec = proto.Field( - proto.MESSAGE, - number=1, - message=gca_table_spec.TableSpec, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class GetColumnSpecRequest(proto.Message): - r"""Request message for - [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. - - Attributes: - name (str): - Required. The resource name of the column - spec to retrieve. - field_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - field_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListColumnSpecsRequest(proto.Message): - r"""Request message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - - Attributes: - parent (str): - Required. The resource name of the table spec - to list column specs from. - field_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - filter (str): - Filter expression, see go/filtering. - page_size (int): - Requested page size. The server can return - fewer results than requested. If unspecified, - the server will pick a default size. - page_token (str): - A token identifying a page of results for the server to - return. Typically obtained from the - [ListColumnSpecsResponse.next_page_token][google.cloud.automl.v1beta1.ListColumnSpecsResponse.next_page_token] - field of the previous - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - field_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListColumnSpecsResponse(proto.Message): - r"""Response message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - - Attributes: - column_specs (MutableSequence[google.cloud.automl_v1beta1.types.ColumnSpec]): - The column specs read. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListColumnSpecsRequest.page_token][google.cloud.automl.v1beta1.ListColumnSpecsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - column_specs: MutableSequence[gca_column_spec.ColumnSpec] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_column_spec.ColumnSpec, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateColumnSpecRequest(proto.Message): - r"""Request message for - [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - - Attributes: - column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): - Required. The column spec which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. - """ - - column_spec: gca_column_spec.ColumnSpec = proto.Field( - proto.MESSAGE, - number=1, - message=gca_column_spec.ColumnSpec, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class CreateModelRequest(proto.Message): - r"""Request message for - [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - - Attributes: - parent (str): - Required. Resource name of the parent project - where the model is being created. - model (google.cloud.automl_v1beta1.types.Model): - Required. The model to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - model: gca_model.Model = proto.Field( - proto.MESSAGE, - number=4, - message=gca_model.Model, - ) - - -class GetModelRequest(proto.Message): - r"""Request message for - [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - - Attributes: - name (str): - Required. Resource name of the model. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelsRequest(proto.Message): - r"""Request message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Attributes: - parent (str): - Required. Resource name of the project, from - which to list the models. - filter (str): - An expression for filtering the results of the request. - - - ``model_metadata`` - for existence of the case (e.g. - ``video_classification_model_metadata:*``). - - - ``dataset_id`` - for = or !=. Some examples of using the - filter are: - - - ``image_classification_model_metadata:*`` --> The model - has ``image_classification_model_metadata``. - - - ``dataset_id=5`` --> The model was created from a dataset - with ID 5. - page_size (int): - Requested page size. - page_token (str): - A token identifying a page of results for the server to - return Typically obtained via - [ListModelsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelsResponse.next_page_token] - of the previous - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListModelsResponse(proto.Message): - r"""Response message for - [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - - Attributes: - model (MutableSequence[google.cloud.automl_v1beta1.types.Model]): - List of models in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelsRequest.page_token][google.cloud.automl.v1beta1.ListModelsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model: MutableSequence[gca_model.Model] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteModelRequest(proto.Message): - r"""Request message for - [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - - Attributes: - name (str): - Required. Resource name of the model being - deleted. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployModelRequest(proto.Message): - r"""Request message for - [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - image_object_detection_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata): - Model deployment metadata specific to Image - Object Detection. - - This field is a member of `oneof`_ ``model_deployment_metadata``. - image_classification_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata): - Model deployment metadata specific to Image - Classification. - - This field is a member of `oneof`_ ``model_deployment_metadata``. - name (str): - Required. Resource name of the model to - deploy. - """ - - image_object_detection_model_deployment_metadata: image.ImageObjectDetectionModelDeploymentMetadata = proto.Field( - proto.MESSAGE, - number=2, - oneof='model_deployment_metadata', - message=image.ImageObjectDetectionModelDeploymentMetadata, - ) - image_classification_model_deployment_metadata: image.ImageClassificationModelDeploymentMetadata = proto.Field( - proto.MESSAGE, - number=4, - oneof='model_deployment_metadata', - message=image.ImageClassificationModelDeploymentMetadata, - ) - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class UndeployModelRequest(proto.Message): - r"""Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. - - Attributes: - name (str): - Required. Resource name of the model to - undeploy. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ExportModelRequest(proto.Message): - r"""Request message for - [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an error code - will be returned. - - Attributes: - name (str): - Required. The resource name of the model to - export. - output_config (google.cloud.automl_v1beta1.types.ModelExportOutputConfig): - Required. The desired output location and - configuration. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - output_config: io.ModelExportOutputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.ModelExportOutputConfig, - ) - - -class ExportEvaluatedExamplesRequest(proto.Message): - r"""Request message for - [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. - - Attributes: - name (str): - Required. The resource name of the model - whose evaluated examples are to be exported. - output_config (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig): - Required. The desired output location and - configuration. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - output_config: io.ExportEvaluatedExamplesOutputConfig = proto.Field( - proto.MESSAGE, - number=3, - message=io.ExportEvaluatedExamplesOutputConfig, - ) - - -class GetModelEvaluationRequest(proto.Message): - r"""Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. - - Attributes: - name (str): - Required. Resource name for the model - evaluation. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationsRequest(proto.Message): - r"""Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - - Attributes: - parent (str): - Required. Resource name of the model to list - the model evaluations for. If modelId is set as - "-", this will list model evaluations from - across all models of the parent location. - filter (str): - An expression for filtering the results of the request. - - - ``annotation_spec_id`` - for =, != or existence. See - example below for the last. - - Some examples of using the filter are: - - - ``annotation_spec_id!=4`` --> The model evaluation was - done for annotation spec with ID different than 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation was - done for aggregate of all annotation specs. - page_size (int): - Requested page size. - page_token (str): - A token identifying a page of results for the server to - return. Typically obtained via - [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1beta1.ListModelEvaluationsResponse.next_page_token] - of the previous - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=6, - ) - - -class ListModelEvaluationsResponse(proto.Message): - r"""Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - - Attributes: - model_evaluation (MutableSequence[google.cloud.automl_v1beta1.types.ModelEvaluation]): - List of model evaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to the - [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1beta1.ListModelEvaluationsRequest.page_token] - field of a new - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations] - request to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluation: MutableSequence[gca_model_evaluation.ModelEvaluation] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_evaluation.ModelEvaluation, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py deleted file mode 100644 index 52789421..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/table_spec.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import io - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TableSpec', - }, -) - - -class TableSpec(proto.Message): - r"""A specification of a relational table. The table's schema is - represented via its child column specs. It is pre-populated as part - of ImportData by schema inference algorithm, the version of which is - a required parameter of ImportData InputConfig. Note: While working - with a table, at times the schema may be inconsistent with the data - in the table (e.g. string in a FLOAT64 column). The consistency - validation is done upon creation of a model. Used by: - - - Tables - - Attributes: - name (str): - Output only. The resource name of the table spec. Form: - - ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}`` - time_column_spec_id (str): - column_spec_id of the time column. Only used if the parent - dataset's ml_use_column_spec_id is not set. Used to split - rows into TRAIN, VALIDATE and TEST sets such that oldest - rows go to TRAIN set, newest to TEST, and those in between - to VALIDATE. Required type: TIMESTAMP. If both this column - and ml_use_column are not set, then ML use of all rows will - be assigned by AutoML. NOTE: Updates of this field will - instantly affect any other users concurrently working with - the dataset. - row_count (int): - Output only. The number of rows (i.e. - examples) in the table. - valid_row_count (int): - Output only. The number of valid rows (i.e. - without values that don't match DataType-s of - their columns). - column_count (int): - Output only. The number of columns of the - table. That is, the number of child - ColumnSpec-s. - input_configs (MutableSequence[google.cloud.automl_v1beta1.types.InputConfig]): - Output only. Input configs via which data - currently residing in the table had been - imported. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - time_column_spec_id: str = proto.Field( - proto.STRING, - number=2, - ) - row_count: int = proto.Field( - proto.INT64, - number=3, - ) - valid_row_count: int = proto.Field( - proto.INT64, - number=4, - ) - column_count: int = proto.Field( - proto.INT64, - number=7, - ) - input_configs: MutableSequence[io.InputConfig] = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=io.InputConfig, - ) - etag: str = proto.Field( - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py deleted file mode 100644 index 46649fb7..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/tables.py +++ /dev/null @@ -1,426 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import data_stats -from google.cloud.automl_v1beta1.types import ranges -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TablesDatasetMetadata', - 'TablesModelMetadata', - 'TablesAnnotation', - 'TablesModelColumnInfo', - }, -) - - -class TablesDatasetMetadata(proto.Message): - r"""Metadata for a dataset used for AutoML Tables. - - Attributes: - primary_table_spec_id (str): - Output only. The table_spec_id of the primary table of this - dataset. - target_column_spec_id (str): - column_spec_id of the primary table's column that should be - used as the training & prediction target. This column must - be non-nullable and have one of following data types - (otherwise model creation will error): - - - CATEGORY - - - FLOAT64 - - If the type is CATEGORY , only up to 100 unique values may - exist in that column across all rows. - - NOTE: Updates of this field will instantly affect any other - users concurrently working with the dataset. - weight_column_spec_id (str): - column_spec_id of the primary table's column that should be - used as the weight column, i.e. the higher the value the - more important the row will be during model training. - Required type: FLOAT64. Allowed values: 0 to 10000, - inclusive on both ends; 0 means the row is ignored for - training. If not set all rows are assumed to have equal - weight of 1. NOTE: Updates of this field will instantly - affect any other users concurrently working with the - dataset. - ml_use_column_spec_id (str): - column_spec_id of the primary table column which specifies a - possible ML use of the row, i.e. the column will be used to - split the rows into TRAIN, VALIDATE and TEST sets. Required - type: STRING. This column, if set, must either have all of - ``TRAIN``, ``VALIDATE``, ``TEST`` among its values, or only - have ``TEST``, ``UNASSIGNED`` values. In the latter case the - rows with ``UNASSIGNED`` value will be assigned by AutoML. - Note that if a given ml use distribution makes it impossible - to create a "good" model, that call will error describing - the issue. If both this column_spec_id and primary table's - time_column_spec_id are not set, then all rows are treated - as ``UNASSIGNED``. NOTE: Updates of this field will - instantly affect any other users concurrently working with - the dataset. - target_column_correlations (MutableMapping[str, google.cloud.automl_v1beta1.types.CorrelationStats]): - Output only. Correlations between - - [TablesDatasetMetadata.target_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_spec_id], - and other columns of the - - [TablesDatasetMetadataprimary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id]. - Only set if the target column is set. Mapping from other - column spec id to its CorrelationStats with the target - column. This field may be stale, see the stats_update_time - field for for the timestamp at which these stats were last - updated. - stats_update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The most recent timestamp when - target_column_correlations field and all descendant - ColumnSpec.data_stats and ColumnSpec.top_correlated_columns - fields were last (re-)generated. Any changes that happened - to the dataset afterwards are not reflected in these fields - values. The regeneration happens in the background on a best - effort basis. - """ - - primary_table_spec_id: str = proto.Field( - proto.STRING, - number=1, - ) - target_column_spec_id: str = proto.Field( - proto.STRING, - number=2, - ) - weight_column_spec_id: str = proto.Field( - proto.STRING, - number=3, - ) - ml_use_column_spec_id: str = proto.Field( - proto.STRING, - number=4, - ) - target_column_correlations: MutableMapping[str, data_stats.CorrelationStats] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=6, - message=data_stats.CorrelationStats, - ) - stats_update_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - - -class TablesModelMetadata(proto.Message): - r"""Model metadata specific to AutoML Tables. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - optimization_objective_recall_value (float): - Required when optimization_objective is - "MAXIMIZE_PRECISION_AT_RECALL". Must be between 0 and 1, - inclusive. - - This field is a member of `oneof`_ ``additional_optimization_objective_config``. - optimization_objective_precision_value (float): - Required when optimization_objective is - "MAXIMIZE_RECALL_AT_PRECISION". Must be between 0 and 1, - inclusive. - - This field is a member of `oneof`_ ``additional_optimization_objective_config``. - target_column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): - Column spec of the dataset's primary table's column the - model is predicting. Snapshotted when model creation - started. Only 3 fields are used: name - May be set on - CreateModel, if it's not then the ColumnSpec corresponding - to the current target_column_spec_id of the dataset the - model is trained from is used. If neither is set, - CreateModel will error. display_name - Output only. - data_type - Output only. - input_feature_column_specs (MutableSequence[google.cloud.automl_v1beta1.types.ColumnSpec]): - Column specs of the dataset's primary table's columns, on - which the model is trained and which are used as the input - for predictions. The - - [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - as well as, according to dataset's state upon model - creation, - - [weight_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spec_id], - and - - [ml_use_column][google.cloud.automl.v1beta1.TablesDatasetMetadata.ml_use_column_spec_id] - must never be included here. - - Only 3 fields are used: - - - name - May be set on CreateModel, if set only the columns - specified are used, otherwise all primary table's columns - (except the ones listed above) are used for the training - and prediction input. - - - display_name - Output only. - - - data_type - Output only. - optimization_objective (str): - Objective function the model is optimizing towards. The - training process creates a model that maximizes/minimizes - the value of the objective function over the validation set. - - The supported optimization objectives depend on the - prediction type. If the field is not set, a default - objective function is used. - - CLASSIFICATION_BINARY: "MAXIMIZE_AU_ROC" (default) - - Maximize the area under the receiver operating - characteristic (ROC) curve. "MINIMIZE_LOG_LOSS" - Minimize - log loss. "MAXIMIZE_AU_PRC" - Maximize the area under the - precision-recall curve. "MAXIMIZE_PRECISION_AT_RECALL" - - Maximize precision for a specified recall value. - "MAXIMIZE_RECALL_AT_PRECISION" - Maximize recall for a - specified precision value. - - CLASSIFICATION_MULTI_CLASS : "MINIMIZE_LOG_LOSS" (default) - - Minimize log loss. - - REGRESSION: "MINIMIZE_RMSE" (default) - Minimize - root-mean-squared error (RMSE). "MINIMIZE_MAE" - Minimize - mean-absolute error (MAE). "MINIMIZE_RMSLE" - Minimize - root-mean-squared log error (RMSLE). - tables_model_column_info (MutableSequence[google.cloud.automl_v1beta1.types.TablesModelColumnInfo]): - Output only. Auxiliary information for each of the - input_feature_column_specs with respect to this particular - model. - train_budget_milli_node_hours (int): - Required. The train budget of creating this - model, expressed in milli node hours i.e. 1,000 - value in this field means 1 node hour. - - The training cost of the model will not exceed - this budget. The final cost will be attempted to - be close to the budget, though may end up being - (even) noticeably smaller - at the backend's - discretion. This especially may happen when - further model training ceases to provide any - improvements. - - If the budget is set to a value known to be - insufficient to train a model for the given - dataset, the training won't be attempted and - will error. - - The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - train_cost_milli_node_hours (int): - Output only. The actual training cost of the - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed the train budget. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. By default, the - early stopping feature is enabled, which means - that AutoML Tables might stop training before - the entire training budget has been used. - """ - - optimization_objective_recall_value: float = proto.Field( - proto.FLOAT, - number=17, - oneof='additional_optimization_objective_config', - ) - optimization_objective_precision_value: float = proto.Field( - proto.FLOAT, - number=18, - oneof='additional_optimization_objective_config', - ) - target_column_spec: column_spec.ColumnSpec = proto.Field( - proto.MESSAGE, - number=2, - message=column_spec.ColumnSpec, - ) - input_feature_column_specs: MutableSequence[column_spec.ColumnSpec] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=column_spec.ColumnSpec, - ) - optimization_objective: str = proto.Field( - proto.STRING, - number=4, - ) - tables_model_column_info: MutableSequence['TablesModelColumnInfo'] = proto.RepeatedField( - proto.MESSAGE, - number=5, - message='TablesModelColumnInfo', - ) - train_budget_milli_node_hours: int = proto.Field( - proto.INT64, - number=6, - ) - train_cost_milli_node_hours: int = proto.Field( - proto.INT64, - number=7, - ) - disable_early_stopping: bool = proto.Field( - proto.BOOL, - number=12, - ) - - -class TablesAnnotation(proto.Message): - r"""Contains annotation details specific to Tables. - - Attributes: - score (float): - Output only. A confidence estimate between 0.0 and 1.0, - inclusive. A higher value means greater confidence in the - returned value. For - - [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - of FLOAT64 data type the score is not populated. - prediction_interval (google.cloud.automl_v1beta1.types.DoubleRange): - Output only. Only populated when - - [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] - has FLOAT64 data type. An interval in which the exactly - correct target value has 95% chance to be in. - value (google.protobuf.struct_pb2.Value): - The predicted value of the row's - - [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]. - The value depends on the column's DataType: - - - CATEGORY - the predicted (with the above confidence - ``score``) CATEGORY value. - - - FLOAT64 - the predicted (with above - ``prediction_interval``) FLOAT64 value. - tables_model_column_info (MutableSequence[google.cloud.automl_v1beta1.types.TablesModelColumnInfo]): - Output only. Auxiliary information for each of the model's - - [input_feature_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] - with respect to this particular prediction. If no other - fields than - - [column_spec_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_spec_name] - and - - [column_display_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name] - would be populated, then this whole field is not. - baseline_score (float): - Output only. Stores the prediction score for - the baseline example, which is defined as the - example with all values set to their baseline - values. This is used as part of the Sampled - Shapley explanation of the model's prediction. - This field is populated only when feature - importance is requested. For regression models, - this holds the baseline prediction for the - baseline example. For classification models, - this holds the baseline prediction for the - baseline example for the argmax class. - """ - - score: float = proto.Field( - proto.FLOAT, - number=1, - ) - prediction_interval: ranges.DoubleRange = proto.Field( - proto.MESSAGE, - number=4, - message=ranges.DoubleRange, - ) - value: struct_pb2.Value = proto.Field( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - tables_model_column_info: MutableSequence['TablesModelColumnInfo'] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='TablesModelColumnInfo', - ) - baseline_score: float = proto.Field( - proto.FLOAT, - number=5, - ) - - -class TablesModelColumnInfo(proto.Message): - r"""An information specific to given column and Tables Model, in - context of the Model and the predictions created by it. - - Attributes: - column_spec_name (str): - Output only. The name of the ColumnSpec - describing the column. Not populated when this - proto is outputted to BigQuery. - column_display_name (str): - Output only. The display name of the column (same as the - display_name of its ColumnSpec). - feature_importance (float): - Output only. When given as part of a Model (always - populated): Measurement of how much model predictions - correctness on the TEST data depend on values in this - column. A value between 0 and 1, higher means higher - influence. These values are normalized - for all input - feature columns of a given model they add to 1. - - When given back by Predict (populated iff - [feature_importance - param][google.cloud.automl.v1beta1.PredictRequest.params] is - set) or Batch Predict (populated iff - [feature_importance][google.cloud.automl.v1beta1.PredictRequest.params] - param is set): Measurement of how impactful for the - prediction returned for the given row the value in this - column was. Specifically, the feature importance specifies - the marginal contribution that the feature made to the - prediction score compared to the baseline score. These - values are computed using the Sampled Shapley method. - """ - - column_spec_name: str = proto.Field( - proto.STRING, - number=1, - ) - column_display_name: str = proto.Field( - proto.STRING, - number=2, - ) - feature_importance: float = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py deleted file mode 100644 index 7bb71eb5..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/temporal.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TimeSegment', - }, -) - - -class TimeSegment(proto.Message): - r"""A time period inside of an example that has a time dimension - (e.g. video). - - Attributes: - start_time_offset (google.protobuf.duration_pb2.Duration): - Start of the time segment (inclusive), - represented as the duration since the example - start. - end_time_offset (google.protobuf.duration_pb2.Duration): - End of the time segment (exclusive), - represented as the duration since the example - start. - """ - - start_time_offset: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - end_time_offset: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py deleted file mode 100644 index 9a59bf8d..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import classification - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TextClassificationDatasetMetadata', - 'TextClassificationModelMetadata', - 'TextExtractionDatasetMetadata', - 'TextExtractionModelMetadata', - 'TextSentimentDatasetMetadata', - 'TextSentimentModelMetadata', - }, -) - - -class TextClassificationDatasetMetadata(proto.Message): - r"""Dataset metadata for classification. - - Attributes: - classification_type (google.cloud.automl_v1beta1.types.ClassificationType): - Required. Type of the classification problem. - """ - - classification_type: classification.ClassificationType = proto.Field( - proto.ENUM, - number=1, - enum=classification.ClassificationType, - ) - - -class TextClassificationModelMetadata(proto.Message): - r"""Model metadata that is specific to text classification. - - Attributes: - classification_type (google.cloud.automl_v1beta1.types.ClassificationType): - Output only. Classification type of the - dataset used to train this model. - """ - - classification_type: classification.ClassificationType = proto.Field( - proto.ENUM, - number=3, - enum=classification.ClassificationType, - ) - - -class TextExtractionDatasetMetadata(proto.Message): - r"""Dataset metadata that is specific to text extraction - """ - - -class TextExtractionModelMetadata(proto.Message): - r"""Model metadata that is specific to text extraction. - - Attributes: - model_hint (str): - Indicates the scope of model use case. - - - ``default``: Use to train a general text extraction - model. Default value. - - - ``health_care``: Use to train a text extraction model - that is tuned for healthcare applications. - """ - - model_hint: str = proto.Field( - proto.STRING, - number=3, - ) - - -class TextSentimentDatasetMetadata(proto.Message): - r"""Dataset metadata for text sentiment. - - Attributes: - sentiment_max (int): - Required. A sentiment is expressed as an integer ordinal, - where higher value means a more positive sentiment. The - range of sentiments that will be used is between 0 and - sentiment_max (inclusive on both ends), and all the values - in the range must be represented in the dataset before a - model can be created. sentiment_max value must be between 1 - and 10 (inclusive). - """ - - sentiment_max: int = proto.Field( - proto.INT32, - number=1, - ) - - -class TextSentimentModelMetadata(proto.Message): - r"""Model metadata that is specific to text sentiment. - """ - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py deleted file mode 100644 index 5c10ff54..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_extraction.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TextExtractionAnnotation', - 'TextExtractionEvaluationMetrics', - }, -) - - -class TextExtractionAnnotation(proto.Message): - r"""Annotation for identifying spans of text. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - text_segment (google.cloud.automl_v1beta1.types.TextSegment): - An entity annotation will set this, which is - the part of the original text to which the - annotation pertains. - - This field is a member of `oneof`_ ``annotation``. - score (float): - Output only. A confidence estimate between - 0.0 and 1.0. A higher value means greater - confidence in correctness of the annotation. - """ - - text_segment: gca_text_segment.TextSegment = proto.Field( - proto.MESSAGE, - number=3, - oneof='annotation', - message=gca_text_segment.TextSegment, - ) - score: float = proto.Field( - proto.FLOAT, - number=1, - ) - - -class TextExtractionEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for text extraction problems. - - Attributes: - au_prc (float): - Output only. The Area under precision recall - curve metric. - confidence_metrics_entries (MutableSequence[google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): - Output only. Metrics that have confidence - thresholds. Precision-recall curve can be - derived from it. - """ - - class ConfidenceMetricsEntry(proto.Message): - r"""Metrics for a single confidence threshold. - - Attributes: - confidence_threshold (float): - Output only. The confidence threshold value - used to compute the metrics. Only annotations - with score of at least this threshold are - considered to be ones the model would return. - recall (float): - Output only. Recall under the given - confidence threshold. - precision (float): - Output only. Precision under the given - confidence threshold. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - """ - - confidence_threshold: float = proto.Field( - proto.FLOAT, - number=1, - ) - recall: float = proto.Field( - proto.FLOAT, - number=3, - ) - precision: float = proto.Field( - proto.FLOAT, - number=4, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=5, - ) - - au_prc: float = proto.Field( - proto.FLOAT, - number=1, - ) - confidence_metrics_entries: MutableSequence[ConfidenceMetricsEntry] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=ConfidenceMetricsEntry, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py deleted file mode 100644 index 86b9feb8..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_segment.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TextSegment', - }, -) - - -class TextSegment(proto.Message): - r"""A contiguous part of a text (string), assuming it has an - UTF-8 NFC encoding. - - Attributes: - content (str): - Output only. The content of the TextSegment. - start_offset (int): - Required. Zero-based character index of the - first character of the text segment (counting - characters from the beginning of the text). - end_offset (int): - Required. Zero-based character index of the first character - past the end of the text segment (counting character from - the beginning of the text). The character at the end_offset - is NOT included in the text segment. - """ - - content: str = proto.Field( - proto.STRING, - number=3, - ) - start_offset: int = proto.Field( - proto.INT64, - number=1, - ) - end_offset: int = proto.Field( - proto.INT64, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py deleted file mode 100644 index 49ac3c89..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/text_sentiment.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import classification - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TextSentimentAnnotation', - 'TextSentimentEvaluationMetrics', - }, -) - - -class TextSentimentAnnotation(proto.Message): - r"""Contains annotation details specific to text sentiment. - - Attributes: - sentiment (int): - Output only. The sentiment with the semantic, as given to - the - [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData] - when populating the dataset from which the model used for - the prediction had been trained. The sentiment values are - between 0 and - Dataset.text_sentiment_dataset_metadata.sentiment_max - (inclusive), with higher value meaning more positive - sentiment. They are completely relative, i.e. 0 means least - positive sentiment and sentiment_max means the most positive - from the sentiments present in the train data. Therefore - e.g. if train data had only negative sentiment, then - sentiment_max, would be still negative (although least - negative). The sentiment shouldn't be confused with "score" - or "magnitude" from the previous Natural Language Sentiment - Analysis API. - """ - - sentiment: int = proto.Field( - proto.INT32, - number=1, - ) - - -class TextSentimentEvaluationMetrics(proto.Message): - r"""Model evaluation metrics for text sentiment problems. - - Attributes: - precision (float): - Output only. Precision. - recall (float): - Output only. Recall. - f1_score (float): - Output only. The harmonic mean of recall and - precision. - mean_absolute_error (float): - Output only. Mean absolute error. Only set - for the overall model evaluation, not for - evaluation of a single annotation spec. - mean_squared_error (float): - Output only. Mean squared error. Only set for - the overall model evaluation, not for evaluation - of a single annotation spec. - linear_kappa (float): - Output only. Linear weighted kappa. Only set - for the overall model evaluation, not for - evaluation of a single annotation spec. - quadratic_kappa (float): - Output only. Quadratic weighted kappa. Only - set for the overall model evaluation, not for - evaluation of a single annotation spec. - confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix): - Output only. Confusion matrix of the - evaluation. Only set for the overall model - evaluation, not for evaluation of a single - annotation spec. - annotation_spec_id (MutableSequence[str]): - Output only. The annotation spec ids used for - this evaluation. Deprecated . - """ - - precision: float = proto.Field( - proto.FLOAT, - number=1, - ) - recall: float = proto.Field( - proto.FLOAT, - number=2, - ) - f1_score: float = proto.Field( - proto.FLOAT, - number=3, - ) - mean_absolute_error: float = proto.Field( - proto.FLOAT, - number=4, - ) - mean_squared_error: float = proto.Field( - proto.FLOAT, - number=5, - ) - linear_kappa: float = proto.Field( - proto.FLOAT, - number=6, - ) - quadratic_kappa: float = proto.Field( - proto.FLOAT, - number=7, - ) - confusion_matrix: classification.ClassificationEvaluationMetrics.ConfusionMatrix = proto.Field( - proto.MESSAGE, - number=8, - message=classification.ClassificationEvaluationMetrics.ConfusionMatrix, - ) - annotation_spec_id: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py deleted file mode 100644 index 3a0ed0a3..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/translation.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.automl_v1beta1.types import data_items - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'TranslationDatasetMetadata', - 'TranslationEvaluationMetrics', - 'TranslationModelMetadata', - 'TranslationAnnotation', - }, -) - - -class TranslationDatasetMetadata(proto.Message): - r"""Dataset metadata that is specific to translation. - - Attributes: - source_language_code (str): - Required. The BCP-47 language code of the - source language. - target_language_code (str): - Required. The BCP-47 language code of the - target language. - """ - - source_language_code: str = proto.Field( - proto.STRING, - number=1, - ) - target_language_code: str = proto.Field( - proto.STRING, - number=2, - ) - - -class TranslationEvaluationMetrics(proto.Message): - r"""Evaluation metrics for the dataset. - - Attributes: - bleu_score (float): - Output only. BLEU score. - base_bleu_score (float): - Output only. BLEU score for base model. - """ - - bleu_score: float = proto.Field( - proto.DOUBLE, - number=1, - ) - base_bleu_score: float = proto.Field( - proto.DOUBLE, - number=2, - ) - - -class TranslationModelMetadata(proto.Message): - r"""Model metadata that is specific to translation. - - Attributes: - base_model (str): - The resource name of the model to use as a baseline to train - the custom model. If unset, we use the default base model - provided by Google Translate. Format: - ``projects/{project_id}/locations/{location_id}/models/{model_id}`` - source_language_code (str): - Output only. Inferred from the dataset. - The source languge (The BCP-47 language code) - that is used for training. - target_language_code (str): - Output only. The target languge (The BCP-47 - language code) that is used for training. - """ - - base_model: str = proto.Field( - proto.STRING, - number=1, - ) - source_language_code: str = proto.Field( - proto.STRING, - number=2, - ) - target_language_code: str = proto.Field( - proto.STRING, - number=3, - ) - - -class TranslationAnnotation(proto.Message): - r"""Annotation details specific to translation. - - Attributes: - translated_content (google.cloud.automl_v1beta1.types.TextSnippet): - Output only . The translated content. - """ - - translated_content: data_items.TextSnippet = proto.Field( - proto.MESSAGE, - number=1, - message=data_items.TextSnippet, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py b/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py deleted file mode 100644 index bbb43723..00000000 --- a/owl-bot-staging/v1beta1/google/cloud/automl_v1beta1/types/video.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.automl.v1beta1', - manifest={ - 'VideoClassificationDatasetMetadata', - 'VideoObjectTrackingDatasetMetadata', - 'VideoClassificationModelMetadata', - 'VideoObjectTrackingModelMetadata', - }, -) - - -class VideoClassificationDatasetMetadata(proto.Message): - r"""Dataset metadata specific to video classification. - All Video Classification datasets are treated as multi label. - - """ - - -class VideoObjectTrackingDatasetMetadata(proto.Message): - r"""Dataset metadata specific to video object tracking. - """ - - -class VideoClassificationModelMetadata(proto.Message): - r"""Model metadata specific to video classification. - """ - - -class VideoObjectTrackingModelMetadata(proto.Message): - r"""Model metadata specific to video object tracking. - """ - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini deleted file mode 100644 index 574c5aed..00000000 --- a/owl-bot-staging/v1beta1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.7 -namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py deleted file mode 100644 index 9b389cd8..00000000 --- a/owl-bot-staging/v1beta1/noxfile.py +++ /dev/null @@ -1,184 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -ALL_PYTHON = [ - "3.7", - "3.8", - "3.9", - "3.10", - "3.11", -] - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - -BLACK_VERSION = "black==22.3.0" -BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.11" - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", - "blacken", - "lint", - "lint_setup_py", -] - -@nox.session(python=ALL_PYTHON) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/automl_v1beta1/', - '--cov=tests/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=ALL_PYTHON) -def mypy(session): - """Run the type checker.""" - session.install( - 'mypy', - 'types-requests', - 'types-protobuf' - ) - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx==7.0.1", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", BLACK_VERSION) - session.run( - "black", - "--check", - *BLACK_PATHS, - ) - session.run("flake8", "google", "tests", "samples") - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def blacken(session): - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - session.run( - "black", - *BLACK_PATHS, - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint_setup_py(session): - """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "pygments") - session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py deleted file mode 100644 index dec9fb5f..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_async.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_CreateDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_create_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - response = await client.create_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_CreateDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py deleted file mode 100644 index d15ed0ab..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_dataset_sync.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_CreateDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_create_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.CreateDatasetRequest( - parent="parent_value", - dataset=dataset, - ) - - # Make the request - response = client.create_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_CreateDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py deleted file mode 100644 index 3e252a04..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_CreateModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_create_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_CreateModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py deleted file mode 100644 index b4d792e7..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_create_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_CreateModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_create_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.CreateModelRequest( - parent="parent_value", - ) - - # Make the request - operation = client.create_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_CreateModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py deleted file mode 100644 index 7b7217f7..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_DeleteDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_delete_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_DeleteDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py deleted file mode 100644 index 67357242..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_dataset_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_DeleteDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_delete_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteDatasetRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_dataset(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_DeleteDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py deleted file mode 100644 index 4af55e71..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_DeleteModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_delete_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_DeleteModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py deleted file mode 100644 index 68e36405..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_delete_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_DeleteModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_delete_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeleteModelRequest( - name="name_value", - ) - - # Make the request - operation = client.delete_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_DeleteModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py deleted file mode 100644 index 90fb4554..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_DeployModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_deploy_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_DeployModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py deleted file mode 100644 index e9d2baa8..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_deploy_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_DeployModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_deploy_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.DeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.deploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_DeployModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py deleted file mode 100644 index 5ea48ce9..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ExportData_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_export_data(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ExportData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py deleted file mode 100644 index 45a22a7d..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_data_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ExportData_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_export_data(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.export_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ExportData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py deleted file mode 100644 index 64a7f787..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportEvaluatedExamples -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_export_evaluated_examples(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportEvaluatedExamplesRequest( - name="name_value", - ) - - # Make the request - operation = client.export_evaluated_examples(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py deleted file mode 100644 index dbb68da9..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportEvaluatedExamples -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_export_evaluated_examples(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportEvaluatedExamplesRequest( - name="name_value", - ) - - # Make the request - operation = client.export_evaluated_examples(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py deleted file mode 100644 index 921c44d2..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ExportModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_export_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportModelRequest( - name="name_value", - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ExportModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py deleted file mode 100644 index a20bb922..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_export_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ExportModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ExportModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_export_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ExportModelRequest( - name="name_value", - ) - - # Make the request - operation = client.export_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ExportModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py deleted file mode 100644 index 2d14135a..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAnnotationSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetAnnotationSpec_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_get_annotation_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_annotation_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetAnnotationSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py deleted file mode 100644 index ba2b38b7..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAnnotationSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetAnnotationSpec_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_get_annotation_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetAnnotationSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_annotation_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetAnnotationSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py deleted file mode 100644 index 03fbf1cc..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetColumnSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetColumnSpec_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_get_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetColumnSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_column_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetColumnSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py deleted file mode 100644 index 78d344d1..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_column_spec_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetColumnSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetColumnSpec_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_get_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetColumnSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_column_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetColumnSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py deleted file mode 100644 index 279656f8..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_get_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = await client.get_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py deleted file mode 100644 index dc179f40..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_dataset_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_get_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetDatasetRequest( - name="name_value", - ) - - # Make the request - response = client.get_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py deleted file mode 100644 index 9c863417..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_get_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py deleted file mode 100644 index 2c04957b..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModelEvaluation -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetModelEvaluation_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_get_model_evaluation(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = await client.get_model_evaluation(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetModelEvaluation_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py deleted file mode 100644 index bcbc2444..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModelEvaluation -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetModelEvaluation_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_get_model_evaluation(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelEvaluationRequest( - name="name_value", - ) - - # Make the request - response = client.get_model_evaluation(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetModelEvaluation_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py deleted file mode 100644 index 69cad83d..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_model_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_get_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetModelRequest( - name="name_value", - ) - - # Make the request - response = client.get_model(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py deleted file mode 100644 index 41251c76..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetTableSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetTableSpec_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_get_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetTableSpecRequest( - name="name_value", - ) - - # Make the request - response = await client.get_table_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetTableSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py deleted file mode 100644 index 954dad4d..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_get_table_spec_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetTableSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_GetTableSpec_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_get_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.GetTableSpecRequest( - name="name_value", - ) - - # Make the request - response = client.get_table_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_GetTableSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py deleted file mode 100644 index 79eb9a13..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ImportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ImportData_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_import_data(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ImportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ImportData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py deleted file mode 100644 index f52edc81..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_import_data_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ImportData -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ImportData_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_import_data(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ImportDataRequest( - name="name_value", - ) - - # Make the request - operation = client.import_data(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_ImportData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py deleted file mode 100644 index 58a7edce..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListColumnSpecs -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListColumnSpecs_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_list_column_specs(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListColumnSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_column_specs(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListColumnSpecs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py deleted file mode 100644 index 19b9e9b9..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_column_specs_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListColumnSpecs -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListColumnSpecs_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_list_column_specs(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListColumnSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_column_specs(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListColumnSpecs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py deleted file mode 100644 index ceacfb9d..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListDatasets -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListDatasets_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_list_datasets(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListDatasets_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py deleted file mode 100644 index 4cbdd955..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_datasets_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListDatasets -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListDatasets_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_list_datasets(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListDatasetsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_datasets(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListDatasets_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py deleted file mode 100644 index 8a7f4acb..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModelEvaluations -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListModelEvaluations_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_list_model_evaluations(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelEvaluationsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListModelEvaluations_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py deleted file mode 100644 index 63bda4b3..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModelEvaluations -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListModelEvaluations_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_list_model_evaluations(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelEvaluationsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_model_evaluations(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListModelEvaluations_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py deleted file mode 100644 index 6db13d63..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModels -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListModels_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_list_models(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListModels_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py deleted file mode 100644 index 16dad314..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_models_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListModels -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListModels_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_list_models(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListModelsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_models(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListModels_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py deleted file mode 100644 index 56e95afc..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListTableSpecs -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListTableSpecs_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_list_table_specs(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListTableSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_table_specs(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListTableSpecs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py deleted file mode 100644 index 22849c8f..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_list_table_specs_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListTableSpecs -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_ListTableSpecs_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_list_table_specs(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.ListTableSpecsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_table_specs(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END automl_v1beta1_generated_AutoMl_ListTableSpecs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py deleted file mode 100644 index be11ea61..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UndeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UndeployModel_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_undeploy_model(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UndeployModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py deleted file mode 100644 index 6b249d5c..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_undeploy_model_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UndeployModel -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UndeployModel_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_undeploy_model(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.UndeployModelRequest( - name="name_value", - ) - - # Make the request - operation = client.undeploy_model(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UndeployModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py deleted file mode 100644 index d0b8afcf..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_async.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateColumnSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UpdateColumnSpec_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_update_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateColumnSpecRequest( - ) - - # Make the request - response = await client.update_column_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UpdateColumnSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py deleted file mode 100644 index dc2b8e01..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_column_spec_sync.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateColumnSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UpdateColumnSpec_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_update_column_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateColumnSpecRequest( - ) - - # Make the request - response = client.update_column_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UpdateColumnSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py deleted file mode 100644 index 34c842e5..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UpdateDataset_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_update_dataset(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = await client.update_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UpdateDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py deleted file mode 100644 index 90e1a4e3..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_dataset_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateDataset -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UpdateDataset_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_update_dataset(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - dataset = automl_v1beta1.Dataset() - dataset.translation_dataset_metadata.source_language_code = "source_language_code_value" - dataset.translation_dataset_metadata.target_language_code = "target_language_code_value" - - request = automl_v1beta1.UpdateDatasetRequest( - dataset=dataset, - ) - - # Make the request - response = client.update_dataset(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UpdateDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py deleted file mode 100644 index a4b47b6c..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_async.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateTableSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UpdateTableSpec_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_update_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateTableSpecRequest( - ) - - # Make the request - response = await client.update_table_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UpdateTableSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py deleted file mode 100644 index 7fb68361..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_auto_ml_update_table_spec_sync.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateTableSpec -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_AutoMl_UpdateTableSpec_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_update_table_spec(): - # Create a client - client = automl_v1beta1.AutoMlClient() - - # Initialize request argument(s) - request = automl_v1beta1.UpdateTableSpecRequest( - ) - - # Make the request - response = client.update_table_spec(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_AutoMl_UpdateTableSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py deleted file mode 100644 index 8526a988..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for BatchPredict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_PredictionService_BatchPredict_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_batch_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - request = automl_v1beta1.BatchPredictRequest( - name="name_value", - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_PredictionService_BatchPredict_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py deleted file mode 100644 index 2011d22c..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_batch_predict_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for BatchPredict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_PredictionService_BatchPredict_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_batch_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceClient() - - # Initialize request argument(s) - request = automl_v1beta1.BatchPredictRequest( - name="name_value", - ) - - # Make the request - operation = client.batch_predict(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_PredictionService_BatchPredict_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py deleted file mode 100644 index e72ceed8..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for Predict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_PredictionService_Predict_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -async def sample_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceAsyncClient() - - # Initialize request argument(s) - payload = automl_v1beta1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1beta1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = await client.predict(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_PredictionService_Predict_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py deleted file mode 100644 index 903c6344..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/automl_v1beta1_generated_prediction_service_predict_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for Predict -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-automl - - -# [START automl_v1beta1_generated_PredictionService_Predict_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import automl_v1beta1 - - -def sample_predict(): - # Create a client - client = automl_v1beta1.PredictionServiceClient() - - # Initialize request argument(s) - payload = automl_v1beta1.ExamplePayload() - payload.image.image_bytes = b'image_bytes_blob' - - request = automl_v1beta1.PredictRequest( - name="name_value", - payload=payload, - ) - - # Make the request - response = client.predict(request=request) - - # Handle the response - print(response) - -# [END automl_v1beta1_generated_PredictionService_Predict_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json b/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json deleted file mode 100644 index 70c04874..00000000 --- a/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json +++ /dev/null @@ -1,4289 +0,0 @@ -{ - "clientLibrary": { - "apis": [ - { - "id": "google.cloud.automl.v1beta1", - "version": "v1beta1" - } - ], - "language": "PYTHON", - "name": "google-cloud-automl", - "version": "0.1.0" - }, - "snippets": [ - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.create_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.CreateDatasetRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1beta1.types.Dataset" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Dataset", - "shortName": "create_dataset" - }, - "description": "Sample for CreateDataset", - "file": "automl_v1beta1_generated_auto_ml_create_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_CreateDataset_async", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_create_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.create_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.CreateDatasetRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1beta1.types.Dataset" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Dataset", - "shortName": "create_dataset" - }, - "description": "Sample for CreateDataset", - "file": "automl_v1beta1_generated_auto_ml_create_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_CreateDataset_sync", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_create_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.create_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.CreateModelRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "model", - "type": "google.cloud.automl_v1beta1.types.Model" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_model" - }, - "description": "Sample for CreateModel", - "file": "automl_v1beta1_generated_auto_ml_create_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_CreateModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_create_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.create_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.CreateModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "CreateModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.CreateModelRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "model", - "type": "google.cloud.automl_v1beta1.types.Model" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_model" - }, - "description": "Sample for CreateModel", - "file": "automl_v1beta1_generated_auto_ml_create_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_CreateModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_create_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.delete_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.DeleteDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_dataset" - }, - "description": "Sample for DeleteDataset", - "file": "automl_v1beta1_generated_auto_ml_delete_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_DeleteDataset_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_delete_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.delete_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.DeleteDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_dataset" - }, - "description": "Sample for DeleteDataset", - "file": "automl_v1beta1_generated_auto_ml_delete_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_DeleteDataset_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_delete_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.delete_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.DeleteModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model" - }, - "description": "Sample for DeleteModel", - "file": "automl_v1beta1_generated_auto_ml_delete_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_DeleteModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_delete_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.delete_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.DeleteModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeleteModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.DeleteModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model" - }, - "description": "Sample for DeleteModel", - "file": "automl_v1beta1_generated_auto_ml_delete_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_DeleteModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_delete_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.deploy_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.DeployModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.DeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "deploy_model" - }, - "description": "Sample for DeployModel", - "file": "automl_v1beta1_generated_auto_ml_deploy_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_DeployModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_deploy_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.deploy_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.DeployModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "DeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.DeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "deploy_model" - }, - "description": "Sample for DeployModel", - "file": "automl_v1beta1_generated_auto_ml_deploy_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_DeployModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_deploy_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.export_data", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportData", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ExportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.OutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_data" - }, - "description": "Sample for ExportData", - "file": "automl_v1beta1_generated_auto_ml_export_data_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ExportData_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_export_data_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.export_data", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportData", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ExportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.OutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "export_data" - }, - "description": "Sample for ExportData", - "file": "automl_v1beta1_generated_auto_ml_export_data_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ExportData_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_export_data_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.export_evaluated_examples", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportEvaluatedExamples" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_evaluated_examples" - }, - "description": "Sample for ExportEvaluatedExamples", - "file": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.export_evaluated_examples", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportEvaluatedExamples" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "export_evaluated_examples" - }, - "description": "Sample for ExportEvaluatedExamples", - "file": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ExportEvaluatedExamples_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_export_evaluated_examples_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.export_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ExportModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.ModelExportOutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_model" - }, - "description": "Sample for ExportModel", - "file": "automl_v1beta1_generated_auto_ml_export_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ExportModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_export_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.export_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ExportModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ExportModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ExportModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.ModelExportOutputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "export_model" - }, - "description": "Sample for ExportModel", - "file": "automl_v1beta1_generated_auto_ml_export_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ExportModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_export_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_annotation_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetAnnotationSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.AnnotationSpec", - "shortName": "get_annotation_spec" - }, - "description": "Sample for GetAnnotationSpec", - "file": "automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetAnnotationSpec_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_annotation_spec_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_annotation_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetAnnotationSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.AnnotationSpec", - "shortName": "get_annotation_spec" - }, - "description": "Sample for GetAnnotationSpec", - "file": "automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetAnnotationSpec_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_annotation_spec_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_column_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetColumnSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetColumnSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetColumnSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", - "shortName": "get_column_spec" - }, - "description": "Sample for GetColumnSpec", - "file": "automl_v1beta1_generated_auto_ml_get_column_spec_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetColumnSpec_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_column_spec_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_column_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetColumnSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetColumnSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetColumnSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", - "shortName": "get_column_spec" - }, - "description": "Sample for GetColumnSpec", - "file": "automl_v1beta1_generated_auto_ml_get_column_spec_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetColumnSpec_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_column_spec_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Dataset", - "shortName": "get_dataset" - }, - "description": "Sample for GetDataset", - "file": "automl_v1beta1_generated_auto_ml_get_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetDataset_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetDatasetRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Dataset", - "shortName": "get_dataset" - }, - "description": "Sample for GetDataset", - "file": "automl_v1beta1_generated_auto_ml_get_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetDataset_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_model_evaluation", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModelEvaluation" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetModelEvaluationRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.ModelEvaluation", - "shortName": "get_model_evaluation" - }, - "description": "Sample for GetModelEvaluation", - "file": "automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetModelEvaluation_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_model_evaluation_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_model_evaluation", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModelEvaluation" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetModelEvaluationRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.ModelEvaluation", - "shortName": "get_model_evaluation" - }, - "description": "Sample for GetModelEvaluation", - "file": "automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetModelEvaluation_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_model_evaluation_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Model", - "shortName": "get_model" - }, - "description": "Sample for GetModel", - "file": "automl_v1beta1_generated_auto_ml_get_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetModel_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Model", - "shortName": "get_model" - }, - "description": "Sample for GetModel", - "file": "automl_v1beta1_generated_auto_ml_get_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetModel_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.get_table_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetTableSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetTableSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetTableSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.TableSpec", - "shortName": "get_table_spec" - }, - "description": "Sample for GetTableSpec", - "file": "automl_v1beta1_generated_auto_ml_get_table_spec_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetTableSpec_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_table_spec_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.get_table_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.GetTableSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "GetTableSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.GetTableSpecRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.TableSpec", - "shortName": "get_table_spec" - }, - "description": "Sample for GetTableSpec", - "file": "automl_v1beta1_generated_auto_ml_get_table_spec_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_GetTableSpec_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_get_table_spec_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.import_data", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ImportData", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ImportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ImportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1beta1.types.InputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "import_data" - }, - "description": "Sample for ImportData", - "file": "automl_v1beta1_generated_auto_ml_import_data_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ImportData_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_import_data_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.import_data", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ImportData", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ImportData" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ImportDataRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1beta1.types.InputConfig" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "import_data" - }, - "description": "Sample for ImportData", - "file": "automl_v1beta1_generated_auto_ml_import_data_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ImportData_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_import_data_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_column_specs", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListColumnSpecs" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListColumnSpecsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager", - "shortName": "list_column_specs" - }, - "description": "Sample for ListColumnSpecs", - "file": "automl_v1beta1_generated_auto_ml_list_column_specs_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListColumnSpecs_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_column_specs_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_column_specs", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListColumnSpecs" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListColumnSpecsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsPager", - "shortName": "list_column_specs" - }, - "description": "Sample for ListColumnSpecs", - "file": "automl_v1beta1_generated_auto_ml_list_column_specs_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListColumnSpecs_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_column_specs_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_datasets", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListDatasets", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListDatasets" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListDatasetsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager", - "shortName": "list_datasets" - }, - "description": "Sample for ListDatasets", - "file": "automl_v1beta1_generated_auto_ml_list_datasets_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListDatasets_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_datasets_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_datasets", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListDatasets", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListDatasets" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListDatasetsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsPager", - "shortName": "list_datasets" - }, - "description": "Sample for ListDatasets", - "file": "automl_v1beta1_generated_auto_ml_list_datasets_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListDatasets_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_datasets_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_model_evaluations", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModelEvaluations" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager", - "shortName": "list_model_evaluations" - }, - "description": "Sample for ListModelEvaluations", - "file": "automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListModelEvaluations_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_model_evaluations_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_model_evaluations", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModelEvaluations" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsPager", - "shortName": "list_model_evaluations" - }, - "description": "Sample for ListModelEvaluations", - "file": "automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListModelEvaluations_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_model_evaluations_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_models", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModels", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModels" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListModelsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager", - "shortName": "list_models" - }, - "description": "Sample for ListModels", - "file": "automl_v1beta1_generated_auto_ml_list_models_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListModels_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_models_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_models", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListModels", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListModels" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListModelsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsPager", - "shortName": "list_models" - }, - "description": "Sample for ListModels", - "file": "automl_v1beta1_generated_auto_ml_list_models_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListModels_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_models_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.list_table_specs", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListTableSpecs", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListTableSpecs" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListTableSpecsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager", - "shortName": "list_table_specs" - }, - "description": "Sample for ListTableSpecs", - "file": "automl_v1beta1_generated_auto_ml_list_table_specs_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListTableSpecs_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_table_specs_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.list_table_specs", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.ListTableSpecs", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "ListTableSpecs" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.ListTableSpecsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsPager", - "shortName": "list_table_specs" - }, - "description": "Sample for ListTableSpecs", - "file": "automl_v1beta1_generated_auto_ml_list_table_specs_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_ListTableSpecs_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_list_table_specs_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.undeploy_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UndeployModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UndeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UndeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "undeploy_model" - }, - "description": "Sample for UndeployModel", - "file": "automl_v1beta1_generated_auto_ml_undeploy_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UndeployModel_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_undeploy_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.undeploy_model", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UndeployModel", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UndeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UndeployModelRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "undeploy_model" - }, - "description": "Sample for UndeployModel", - "file": "automl_v1beta1_generated_auto_ml_undeploy_model_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UndeployModel_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_undeploy_model_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.update_column_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateColumnSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest" - }, - { - "name": "column_spec", - "type": "google.cloud.automl_v1beta1.types.ColumnSpec" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", - "shortName": "update_column_spec" - }, - "description": "Sample for UpdateColumnSpec", - "file": "automl_v1beta1_generated_auto_ml_update_column_spec_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UpdateColumnSpec_async", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 47, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "start": 48, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_update_column_spec_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.update_column_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateColumnSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest" - }, - { - "name": "column_spec", - "type": "google.cloud.automl_v1beta1.types.ColumnSpec" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.ColumnSpec", - "shortName": "update_column_spec" - }, - "description": "Sample for UpdateColumnSpec", - "file": "automl_v1beta1_generated_auto_ml_update_column_spec_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UpdateColumnSpec_sync", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 47, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "start": 48, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_update_column_spec_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.update_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UpdateDatasetRequest" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1beta1.types.Dataset" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Dataset", - "shortName": "update_dataset" - }, - "description": "Sample for UpdateDataset", - "file": "automl_v1beta1_generated_auto_ml_update_dataset_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UpdateDataset_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_update_dataset_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.update_dataset", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateDataset", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateDataset" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UpdateDatasetRequest" - }, - { - "name": "dataset", - "type": "google.cloud.automl_v1beta1.types.Dataset" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.Dataset", - "shortName": "update_dataset" - }, - "description": "Sample for UpdateDataset", - "file": "automl_v1beta1_generated_auto_ml_update_dataset_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UpdateDataset_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_update_dataset_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient", - "shortName": "AutoMlAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlAsyncClient.update_table_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateTableSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UpdateTableSpecRequest" - }, - { - "name": "table_spec", - "type": "google.cloud.automl_v1beta1.types.TableSpec" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.TableSpec", - "shortName": "update_table_spec" - }, - "description": "Sample for UpdateTableSpec", - "file": "automl_v1beta1_generated_auto_ml_update_table_spec_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UpdateTableSpec_async", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 47, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "start": 48, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_update_table_spec_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.AutoMlClient", - "shortName": "AutoMlClient" - }, - "fullName": "google.cloud.automl_v1beta1.AutoMlClient.update_table_spec", - "method": { - "fullName": "google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec", - "service": { - "fullName": "google.cloud.automl.v1beta1.AutoMl", - "shortName": "AutoMl" - }, - "shortName": "UpdateTableSpec" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.UpdateTableSpecRequest" - }, - { - "name": "table_spec", - "type": "google.cloud.automl_v1beta1.types.TableSpec" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.TableSpec", - "shortName": "update_table_spec" - }, - "description": "Sample for UpdateTableSpec", - "file": "automl_v1beta1_generated_auto_ml_update_table_spec_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_AutoMl_UpdateTableSpec_sync", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 47, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "start": 48, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_auto_ml_update_table_spec_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient.batch_predict", - "method": { - "fullName": "google.cloud.automl.v1beta1.PredictionService.BatchPredict", - "service": { - "fullName": "google.cloud.automl.v1beta1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "BatchPredict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.BatchPredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1beta1.types.BatchPredictInputConfig" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.BatchPredictOutputConfig" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_predict" - }, - "description": "Sample for BatchPredict", - "file": "automl_v1beta1_generated_prediction_service_batch_predict_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_PredictionService_BatchPredict_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_prediction_service_batch_predict_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" - }, - "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient.batch_predict", - "method": { - "fullName": "google.cloud.automl.v1beta1.PredictionService.BatchPredict", - "service": { - "fullName": "google.cloud.automl.v1beta1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "BatchPredict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.BatchPredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "input_config", - "type": "google.cloud.automl_v1beta1.types.BatchPredictInputConfig" - }, - { - "name": "output_config", - "type": "google.cloud.automl_v1beta1.types.BatchPredictOutputConfig" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "batch_predict" - }, - "description": "Sample for BatchPredict", - "file": "automl_v1beta1_generated_prediction_service_batch_predict_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_PredictionService_BatchPredict_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_prediction_service_batch_predict_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" - }, - "fullName": "google.cloud.automl_v1beta1.PredictionServiceAsyncClient.predict", - "method": { - "fullName": "google.cloud.automl.v1beta1.PredictionService.Predict", - "service": { - "fullName": "google.cloud.automl.v1beta1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "Predict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.PredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "payload", - "type": "google.cloud.automl_v1beta1.types.ExamplePayload" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.PredictResponse", - "shortName": "predict" - }, - "description": "Sample for Predict", - "file": "automl_v1beta1_generated_prediction_service_predict_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_PredictionService_Predict_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_prediction_service_predict_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" - }, - "fullName": "google.cloud.automl_v1beta1.PredictionServiceClient.predict", - "method": { - "fullName": "google.cloud.automl.v1beta1.PredictionService.Predict", - "service": { - "fullName": "google.cloud.automl.v1beta1.PredictionService", - "shortName": "PredictionService" - }, - "shortName": "Predict" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.automl_v1beta1.types.PredictRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "payload", - "type": "google.cloud.automl_v1beta1.types.ExamplePayload" - }, - { - "name": "params", - "type": "MutableMapping[str, str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.cloud.automl_v1beta1.types.PredictResponse", - "shortName": "predict" - }, - "description": "Sample for Predict", - "file": "automl_v1beta1_generated_prediction_service_predict_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "automl_v1beta1_generated_PredictionService_Predict_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 49, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 50, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "automl_v1beta1_generated_prediction_service_predict_sync.py" - } - ] -} diff --git a/owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py deleted file mode 100644 index 17376fc5..00000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_automl_v1beta1_keywords.py +++ /dev/null @@ -1,201 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class automlCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'batch_predict': ('name', 'input_config', 'output_config', 'params', ), - 'create_dataset': ('parent', 'dataset', ), - 'create_model': ('parent', 'model', ), - 'delete_dataset': ('name', ), - 'delete_model': ('name', ), - 'deploy_model': ('name', 'image_object_detection_model_deployment_metadata', 'image_classification_model_deployment_metadata', ), - 'export_data': ('name', 'output_config', ), - 'export_evaluated_examples': ('name', 'output_config', ), - 'export_model': ('name', 'output_config', ), - 'get_annotation_spec': ('name', ), - 'get_column_spec': ('name', 'field_mask', ), - 'get_dataset': ('name', ), - 'get_model': ('name', ), - 'get_model_evaluation': ('name', ), - 'get_table_spec': ('name', 'field_mask', ), - 'import_data': ('name', 'input_config', ), - 'list_column_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ), - 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', ), - 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', ), - 'list_models': ('parent', 'filter', 'page_size', 'page_token', ), - 'list_table_specs': ('parent', 'field_mask', 'filter', 'page_size', 'page_token', ), - 'predict': ('name', 'payload', 'params', ), - 'undeploy_model': ('name', ), - 'update_column_spec': ('column_spec', 'update_mask', ), - 'update_dataset': ('dataset', 'update_mask', ), - 'update_table_spec': ('table_spec', 'update_mask', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=automlCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the automl client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py deleted file mode 100644 index 95b4c8d1..00000000 --- a/owl-bot-staging/v1beta1/setup.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os - -import setuptools # type: ignore - -package_root = os.path.abspath(os.path.dirname(__file__)) - -name = 'google-cloud-automl' - - -description = "Google Cloud Automl API client library" - -version = {} -with open(os.path.join(package_root, 'google/cloud/automl/gapic_version.py')) as fp: - exec(fp.read(), version) -version = version["__version__"] - -if version[0] == "0": - release_status = "Development Status :: 4 - Beta" -else: - release_status = "Development Status :: 5 - Production/Stable" - -dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "proto-plus >= 1.22.0, <2.0.0dev", - "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", - "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", -] -url = "https://github.com/googleapis/python-automl" - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, "README.rst") -with io.open(readme_filename, encoding="utf-8") as readme_file: - readme = readme_file.read() - -packages = [ - package - for package in setuptools.PEP420PackageFinder.find() - if package.startswith("google") -] - -namespaces = ["google", "google.cloud"] - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author="Google LLC", - author_email="googleapis-packages@google.com", - license="Apache 2.0", - url=url, - classifiers=[ - release_status, - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Operating System :: OS Independent", - "Topic :: Internet", - ], - platforms="Posix; MacOS X; Windows", - packages=packages, - python_requires=">=3.7", - namespace_packages=namespaces, - install_requires=dependencies, - include_package_data=True, - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.10.txt b/owl-bot-staging/v1beta1/testing/constraints-3.10.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1beta1/testing/constraints-3.10.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.11.txt b/owl-bot-staging/v1beta1/testing/constraints-3.11.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1beta1/testing/constraints-3.11.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.12.txt b/owl-bot-staging/v1beta1/testing/constraints-3.12.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1beta1/testing/constraints-3.12.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.7.txt b/owl-bot-staging/v1beta1/testing/constraints-3.7.txt deleted file mode 100644 index 6c44adfe..00000000 --- a/owl-bot-staging/v1beta1/testing/constraints-3.7.txt +++ /dev/null @@ -1,9 +0,0 @@ -# This constraints file is used to check that lower bounds -# are correct in setup.py -# List all library dependencies and extras in this file. -# Pin the version to the lower bound. -# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", -# Then this file should have google-cloud-foo==1.14.0 -google-api-core==1.34.0 -proto-plus==1.22.0 -protobuf==3.19.5 diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.8.txt b/owl-bot-staging/v1beta1/testing/constraints-3.8.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1beta1/testing/constraints-3.8.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1beta1/testing/constraints-3.9.txt b/owl-bot-staging/v1beta1/testing/constraints-3.9.txt deleted file mode 100644 index ed7f9aed..00000000 --- a/owl-bot-staging/v1beta1/testing/constraints-3.9.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1beta1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py deleted file mode 100644 index 1b4db446..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py deleted file mode 100644 index 8248424c..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_auto_ml.py +++ /dev/null @@ -1,14494 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable -from google.protobuf import json_format -import json -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.automl_v1beta1.services.auto_ml import AutoMlAsyncClient -from google.cloud.automl_v1beta1.services.auto_ml import AutoMlClient -from google.cloud.automl_v1beta1.services.auto_ml import pagers -from google.cloud.automl_v1beta1.services.auto_ml import transports -from google.cloud.automl_v1beta1.types import annotation_spec -from google.cloud.automl_v1beta1.types import classification -from google.cloud.automl_v1beta1.types import column_spec -from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec -from google.cloud.automl_v1beta1.types import data_stats -from google.cloud.automl_v1beta1.types import data_types -from google.cloud.automl_v1beta1.types import dataset -from google.cloud.automl_v1beta1.types import dataset as gca_dataset -from google.cloud.automl_v1beta1.types import detection -from google.cloud.automl_v1beta1.types import image -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import model -from google.cloud.automl_v1beta1.types import model as gca_model -from google.cloud.automl_v1beta1.types import model_evaluation -from google.cloud.automl_v1beta1.types import operations -from google.cloud.automl_v1beta1.types import regression -from google.cloud.automl_v1beta1.types import service -from google.cloud.automl_v1beta1.types import table_spec -from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec -from google.cloud.automl_v1beta1.types import tables -from google.cloud.automl_v1beta1.types import text -from google.cloud.automl_v1beta1.types import text_extraction -from google.cloud.automl_v1beta1.types import text_sentiment -from google.cloud.automl_v1beta1.types import translation -from google.cloud.automl_v1beta1.types import video -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert AutoMlClient._get_default_mtls_endpoint(None) is None - assert AutoMlClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class,transport_name", [ - (AutoMlClient, "grpc"), - (AutoMlAsyncClient, "grpc_asyncio"), - (AutoMlClient, "rest"), -]) -def test_auto_ml_client_from_service_account_info(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.AutoMlGrpcTransport, "grpc"), - (transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.AutoMlRestTransport, "rest"), -]) -def test_auto_ml_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class,transport_name", [ - (AutoMlClient, "grpc"), - (AutoMlAsyncClient, "grpc_asyncio"), - (AutoMlClient, "rest"), -]) -def test_auto_ml_client_from_service_account_file(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -def test_auto_ml_client_get_transport_class(): - transport = AutoMlClient.get_transport_class() - available_transports = [ - transports.AutoMlGrpcTransport, - transports.AutoMlRestTransport, - ] - assert transport in available_transports - - transport = AutoMlClient.get_transport_class("grpc") - assert transport == transports.AutoMlGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), - (AutoMlClient, transports.AutoMlRestTransport, "rest"), -]) -@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) -@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) -def test_auto_ml_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(AutoMlClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class(transport=transport_name) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class(transport=transport_name) - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions(api_audience="https://language.googleapis.com") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com" - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "true"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", "false"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", "false"), - (AutoMlClient, transports.AutoMlRestTransport, "rest", "true"), - (AutoMlClient, transports.AutoMlRestTransport, "rest", "false"), -]) -@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) -@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_auto_ml_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class", [ - AutoMlClient, AutoMlAsyncClient -]) -@mock.patch.object(AutoMlClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlClient)) -@mock.patch.object(AutoMlAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoMlAsyncClient)) -def test_auto_ml_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc"), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio"), - (AutoMlClient, transports.AutoMlRestTransport, "rest"), -]) -def test_auto_ml_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), - (AutoMlClient, transports.AutoMlRestTransport, "rest", None), -]) -def test_auto_ml_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -def test_auto_ml_client_client_options_from_dict(): - with mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = AutoMlClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (AutoMlClient, transports.AutoMlGrpcTransport, "grpc", grpc_helpers), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), -]) -def test_auto_ml_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=None, - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("request_type", [ - service.CreateDatasetRequest, - dict, -]) -def test_create_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - response = client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_create_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - client.create_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateDatasetRequest() - -@pytest.mark.asyncio -async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=service.CreateDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - )) - response = await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_create_dataset_async_from_dict(): - await test_create_dataset_async(request_type=dict) - - -def test_create_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateDatasetRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = gca_dataset.Dataset() - client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateDatasetRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - - -def test_create_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - -@pytest.mark.asyncio -async def test_create_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_dataset( - service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetDatasetRequest, - dict, -]) -def test_get_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - response = client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_get_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - client.get_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetDatasetRequest() - -@pytest.mark.asyncio -async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=service.GetDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - )) - response = await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_dataset_async_from_dict(): - await test_get_dataset_async(request_type=dict) - - -def test_get_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = dataset.Dataset() - client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - service.GetDatasetRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_dataset( - service.GetDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListDatasetsRequest, - dict, -]) -def test_list_datasets(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - client.list_datasets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListDatasetsRequest() - -@pytest.mark.asyncio -async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=service.ListDatasetsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_datasets_async_from_dict(): - await test_list_datasets_async(request_type=dict) - - -def test_list_datasets_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListDatasetsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = service.ListDatasetsResponse() - client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_datasets_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListDatasetsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) - await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_datasets_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListDatasetsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_datasets_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - service.ListDatasetsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_datasets_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListDatasetsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListDatasetsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_datasets_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_datasets( - service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_datasets(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) -def test_list_datasets_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = list(client.list_datasets(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_datasets_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_datasets(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_datasets_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_datasets(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - service.UpdateDatasetRequest, - dict, -]) -def test_update_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - response = client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_update_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - client.update_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateDatasetRequest() - -@pytest.mark.asyncio -async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=service.UpdateDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - )) - response = await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_dataset_async_from_dict(): - await test_update_dataset_async(request_type=dict) - - -def test_update_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateDatasetRequest() - - request.dataset.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = gca_dataset.Dataset() - client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateDatasetRequest() - - request.dataset.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=name_value', - ) in kw['metadata'] - - -def test_update_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_dataset( - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - - -def test_update_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - -@pytest.mark.asyncio -async def test_update_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_dataset( - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_dataset( - service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeleteDatasetRequest, - dict, -]) -def test_delete_dataset(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - client.delete_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteDatasetRequest() - -@pytest.mark.asyncio -async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=service.DeleteDatasetRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_dataset_async_from_dict(): - await test_delete_dataset_async(request_type=dict) - - -def test_delete_dataset_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_dataset_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteDatasetRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_dataset_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_dataset_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - service.DeleteDatasetRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_dataset( - service.DeleteDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ImportDataRequest, - dict, -]) -def test_import_data(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - client.import_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ImportDataRequest() - -@pytest.mark.asyncio -async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=service.ImportDataRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_data_async_from_dict(): - await test_import_data_async(request_type=dict) - - -def test_import_data_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ImportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_data_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ImportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_import_data_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_data( - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - - -def test_import_data_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - service.ImportDataRequest(), - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - -@pytest.mark.asyncio -async def test_import_data_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_data( - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_import_data_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_data( - service.ImportDataRequest(), - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportDataRequest, - dict, -]) -def test_export_data(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - client.export_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportDataRequest() - -@pytest.mark.asyncio -async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=service.ExportDataRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_data_async_from_dict(): - await test_export_data_async(request_type=dict) - - -def test_export_data_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_data_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportDataRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_export_data_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_data( - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -def test_export_data_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - service.ExportDataRequest(), - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - -@pytest.mark.asyncio -async def test_export_data_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_data( - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_export_data_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_data( - service.ExportDataRequest(), - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetAnnotationSpecRequest, - dict, -]) -def test_get_annotation_spec(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - example_count=1396, - ) - response = client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.example_count == 1396 - - -def test_get_annotation_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - client.get_annotation_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetAnnotationSpecRequest() - -@pytest.mark.asyncio -async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetAnnotationSpecRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - example_count=1396, - )) - response = await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.example_count == 1396 - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async_from_dict(): - await test_get_annotation_spec_async(request_type=dict) - - -def test_get_annotation_spec_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetAnnotationSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = annotation_spec.AnnotationSpec() - client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_annotation_spec_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetAnnotationSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_annotation_spec_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_annotation_spec_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - service.GetAnnotationSpecRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_annotation_spec( - service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetTableSpecRequest, - dict, -]) -def test_get_table_spec(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table_spec.TableSpec( - name='name_value', - time_column_spec_id='time_column_spec_id_value', - row_count=992, - valid_row_count=1615, - column_count=1302, - etag='etag_value', - ) - response = client.get_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetTableSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table_spec.TableSpec) - assert response.name == 'name_value' - assert response.time_column_spec_id == 'time_column_spec_id_value' - assert response.row_count == 992 - assert response.valid_row_count == 1615 - assert response.column_count == 1302 - assert response.etag == 'etag_value' - - -def test_get_table_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table_spec), - '__call__') as call: - client.get_table_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetTableSpecRequest() - -@pytest.mark.asyncio -async def test_get_table_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetTableSpecRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table_spec.TableSpec( - name='name_value', - time_column_spec_id='time_column_spec_id_value', - row_count=992, - valid_row_count=1615, - column_count=1302, - etag='etag_value', - )) - response = await client.get_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetTableSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table_spec.TableSpec) - assert response.name == 'name_value' - assert response.time_column_spec_id == 'time_column_spec_id_value' - assert response.row_count == 992 - assert response.valid_row_count == 1615 - assert response.column_count == 1302 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_table_spec_async_from_dict(): - await test_get_table_spec_async(request_type=dict) - - -def test_get_table_spec_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetTableSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table_spec), - '__call__') as call: - call.return_value = table_spec.TableSpec() - client.get_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_table_spec_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetTableSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table_spec.TableSpec()) - await client.get_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_table_spec_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table_spec.TableSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_table_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_table_spec_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_table_spec( - service.GetTableSpecRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_table_spec_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table_spec.TableSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table_spec.TableSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_table_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_table_spec_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_table_spec( - service.GetTableSpecRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListTableSpecsRequest, - dict, -]) -def test_list_table_specs(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListTableSpecsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_table_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListTableSpecsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTableSpecsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_table_specs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - client.list_table_specs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListTableSpecsRequest() - -@pytest.mark.asyncio -async def test_list_table_specs_async(transport: str = 'grpc_asyncio', request_type=service.ListTableSpecsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListTableSpecsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_table_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListTableSpecsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTableSpecsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_table_specs_async_from_dict(): - await test_list_table_specs_async(request_type=dict) - - -def test_list_table_specs_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListTableSpecsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - call.return_value = service.ListTableSpecsResponse() - client.list_table_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_table_specs_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListTableSpecsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListTableSpecsResponse()) - await client.list_table_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_table_specs_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListTableSpecsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_table_specs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_table_specs_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_table_specs( - service.ListTableSpecsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_table_specs_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListTableSpecsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListTableSpecsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_table_specs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_table_specs_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_table_specs( - service.ListTableSpecsRequest(), - parent='parent_value', - ) - - -def test_list_table_specs_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - next_page_token='abc', - ), - service.ListTableSpecsResponse( - table_specs=[], - next_page_token='def', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - ], - next_page_token='ghi', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_table_specs(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table_spec.TableSpec) - for i in results) -def test_list_table_specs_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - next_page_token='abc', - ), - service.ListTableSpecsResponse( - table_specs=[], - next_page_token='def', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - ], - next_page_token='ghi', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - ), - RuntimeError, - ) - pages = list(client.list_table_specs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_table_specs_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - next_page_token='abc', - ), - service.ListTableSpecsResponse( - table_specs=[], - next_page_token='def', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - ], - next_page_token='ghi', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_table_specs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table_spec.TableSpec) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_table_specs_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_table_specs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - next_page_token='abc', - ), - service.ListTableSpecsResponse( - table_specs=[], - next_page_token='def', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - ], - next_page_token='ghi', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_table_specs(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - service.UpdateTableSpecRequest, - dict, -]) -def test_update_table_spec(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_table_spec.TableSpec( - name='name_value', - time_column_spec_id='time_column_spec_id_value', - row_count=992, - valid_row_count=1615, - column_count=1302, - etag='etag_value', - ) - response = client.update_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateTableSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_table_spec.TableSpec) - assert response.name == 'name_value' - assert response.time_column_spec_id == 'time_column_spec_id_value' - assert response.row_count == 992 - assert response.valid_row_count == 1615 - assert response.column_count == 1302 - assert response.etag == 'etag_value' - - -def test_update_table_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table_spec), - '__call__') as call: - client.update_table_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateTableSpecRequest() - -@pytest.mark.asyncio -async def test_update_table_spec_async(transport: str = 'grpc_asyncio', request_type=service.UpdateTableSpecRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_table_spec.TableSpec( - name='name_value', - time_column_spec_id='time_column_spec_id_value', - row_count=992, - valid_row_count=1615, - column_count=1302, - etag='etag_value', - )) - response = await client.update_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateTableSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_table_spec.TableSpec) - assert response.name == 'name_value' - assert response.time_column_spec_id == 'time_column_spec_id_value' - assert response.row_count == 992 - assert response.valid_row_count == 1615 - assert response.column_count == 1302 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_table_spec_async_from_dict(): - await test_update_table_spec_async(request_type=dict) - - -def test_update_table_spec_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateTableSpecRequest() - - request.table_spec.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table_spec), - '__call__') as call: - call.return_value = gca_table_spec.TableSpec() - client.update_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table_spec.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_table_spec_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateTableSpecRequest() - - request.table_spec.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_table_spec.TableSpec()) - await client.update_table_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table_spec.name=name_value', - ) in kw['metadata'] - - -def test_update_table_spec_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_table_spec.TableSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_table_spec( - table_spec=gca_table_spec.TableSpec(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_spec - mock_val = gca_table_spec.TableSpec(name='name_value') - assert arg == mock_val - - -def test_update_table_spec_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table_spec( - service.UpdateTableSpecRequest(), - table_spec=gca_table_spec.TableSpec(name='name_value'), - ) - -@pytest.mark.asyncio -async def test_update_table_spec_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_table_spec.TableSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_table_spec.TableSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_table_spec( - table_spec=gca_table_spec.TableSpec(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_spec - mock_val = gca_table_spec.TableSpec(name='name_value') - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_table_spec_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_table_spec( - service.UpdateTableSpecRequest(), - table_spec=gca_table_spec.TableSpec(name='name_value'), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetColumnSpecRequest, - dict, -]) -def test_get_column_spec(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = column_spec.ColumnSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - response = client.get_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetColumnSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, column_spec.ColumnSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_get_column_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_column_spec), - '__call__') as call: - client.get_column_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetColumnSpecRequest() - -@pytest.mark.asyncio -async def test_get_column_spec_async(transport: str = 'grpc_asyncio', request_type=service.GetColumnSpecRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(column_spec.ColumnSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - )) - response = await client.get_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetColumnSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, column_spec.ColumnSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_column_spec_async_from_dict(): - await test_get_column_spec_async(request_type=dict) - - -def test_get_column_spec_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetColumnSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_column_spec), - '__call__') as call: - call.return_value = column_spec.ColumnSpec() - client.get_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_column_spec_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetColumnSpecRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_column_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(column_spec.ColumnSpec()) - await client.get_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_column_spec_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = column_spec.ColumnSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_column_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_column_spec_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_column_spec( - service.GetColumnSpecRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_column_spec_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = column_spec.ColumnSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(column_spec.ColumnSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_column_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_column_spec_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_column_spec( - service.GetColumnSpecRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListColumnSpecsRequest, - dict, -]) -def test_list_column_specs(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListColumnSpecsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_column_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListColumnSpecsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListColumnSpecsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_column_specs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - client.list_column_specs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListColumnSpecsRequest() - -@pytest.mark.asyncio -async def test_list_column_specs_async(transport: str = 'grpc_asyncio', request_type=service.ListColumnSpecsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListColumnSpecsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_column_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListColumnSpecsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListColumnSpecsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_column_specs_async_from_dict(): - await test_list_column_specs_async(request_type=dict) - - -def test_list_column_specs_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListColumnSpecsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - call.return_value = service.ListColumnSpecsResponse() - client.list_column_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_column_specs_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListColumnSpecsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListColumnSpecsResponse()) - await client.list_column_specs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_column_specs_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListColumnSpecsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_column_specs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_column_specs_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_column_specs( - service.ListColumnSpecsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_column_specs_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListColumnSpecsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListColumnSpecsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_column_specs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_column_specs_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_column_specs( - service.ListColumnSpecsRequest(), - parent='parent_value', - ) - - -def test_list_column_specs_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - next_page_token='abc', - ), - service.ListColumnSpecsResponse( - column_specs=[], - next_page_token='def', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - ], - next_page_token='ghi', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_column_specs(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, column_spec.ColumnSpec) - for i in results) -def test_list_column_specs_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - next_page_token='abc', - ), - service.ListColumnSpecsResponse( - column_specs=[], - next_page_token='def', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - ], - next_page_token='ghi', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - ), - RuntimeError, - ) - pages = list(client.list_column_specs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_column_specs_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - next_page_token='abc', - ), - service.ListColumnSpecsResponse( - column_specs=[], - next_page_token='def', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - ], - next_page_token='ghi', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_column_specs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, column_spec.ColumnSpec) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_column_specs_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_column_specs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - next_page_token='abc', - ), - service.ListColumnSpecsResponse( - column_specs=[], - next_page_token='def', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - ], - next_page_token='ghi', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_column_specs(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - service.UpdateColumnSpecRequest, - dict, -]) -def test_update_column_spec(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_column_spec.ColumnSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - response = client.update_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateColumnSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_column_spec.ColumnSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_update_column_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_column_spec), - '__call__') as call: - client.update_column_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateColumnSpecRequest() - -@pytest.mark.asyncio -async def test_update_column_spec_async(transport: str = 'grpc_asyncio', request_type=service.UpdateColumnSpecRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_column_spec.ColumnSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - )) - response = await client.update_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.UpdateColumnSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_column_spec.ColumnSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_column_spec_async_from_dict(): - await test_update_column_spec_async(request_type=dict) - - -def test_update_column_spec_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateColumnSpecRequest() - - request.column_spec.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_column_spec), - '__call__') as call: - call.return_value = gca_column_spec.ColumnSpec() - client.update_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'column_spec.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_column_spec_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UpdateColumnSpecRequest() - - request.column_spec.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_column_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_column_spec.ColumnSpec()) - await client.update_column_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'column_spec.name=name_value', - ) in kw['metadata'] - - -def test_update_column_spec_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_column_spec.ColumnSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_column_spec( - column_spec=gca_column_spec.ColumnSpec(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].column_spec - mock_val = gca_column_spec.ColumnSpec(name='name_value') - assert arg == mock_val - - -def test_update_column_spec_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_column_spec( - service.UpdateColumnSpecRequest(), - column_spec=gca_column_spec.ColumnSpec(name='name_value'), - ) - -@pytest.mark.asyncio -async def test_update_column_spec_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_column_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_column_spec.ColumnSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_column_spec.ColumnSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_column_spec( - column_spec=gca_column_spec.ColumnSpec(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].column_spec - mock_val = gca_column_spec.ColumnSpec(name='name_value') - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_column_spec_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_column_spec( - service.UpdateColumnSpecRequest(), - column_spec=gca_column_spec.ColumnSpec(name='name_value'), - ) - - -@pytest.mark.parametrize("request_type", [ - service.CreateModelRequest, - dict, -]) -def test_create_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - client.create_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateModelRequest() - -@pytest.mark.asyncio -async def test_create_model_async(transport: str = 'grpc_asyncio', request_type=service.CreateModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.CreateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_model_async_from_dict(): - await test_create_model_async(request_type=dict) - - -def test_create_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateModelRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.CreateModelRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_model( - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) - assert arg == mock_val - - -def test_create_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_model( - service.CreateModelRequest(), - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - -@pytest.mark.asyncio -async def test_create_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_model( - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_model( - service.CreateModelRequest(), - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelRequest, - dict, -]) -def test_get_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=model.Model.DeploymentState.DEPLOYED, - ) - response = client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == model.Model.DeploymentState.DEPLOYED - - -def test_get_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - client.get_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelRequest() - -@pytest.mark.asyncio -async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=service.GetModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=model.Model.DeploymentState.DEPLOYED, - )) - response = await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == model.Model.DeploymentState.DEPLOYED - - -@pytest.mark.asyncio -async def test_get_model_async_from_dict(): - await test_get_model_async(request_type=dict) - - -def test_get_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = model.Model() - client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - service.GetModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model( - service.GetModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelsRequest, - dict, -]) -def test_list_models(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - client.list_models() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelsRequest() - -@pytest.mark.asyncio -async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=service.ListModelsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_models_async_from_dict(): - await test_list_models_async(request_type=dict) - - -def test_list_models_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = service.ListModelsResponse() - client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_models_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) - await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_models_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_models_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - service.ListModelsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_models_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_models_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_models( - service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_models(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) -def test_list_models_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = list(client.list_models(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_models_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_models(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model.Model) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_models_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_models(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - service.DeleteModelRequest, - dict, -]) -def test_delete_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - client.delete_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteModelRequest() - -@pytest.mark.asyncio -async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=service.DeleteModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_async_from_dict(): - await test_delete_model_async(request_type=dict) - - -def test_delete_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeleteModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - service.DeleteModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model( - service.DeleteModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeployModelRequest, - dict, -]) -def test_deploy_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - client.deploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeployModelRequest() - -@pytest.mark.asyncio -async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=service.DeployModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_model_async_from_dict(): - await test_deploy_model_async(request_type=dict) - - -def test_deploy_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.DeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_deploy_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_deploy_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - service.DeployModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_deploy_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_deploy_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_model( - service.DeployModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.UndeployModelRequest, - dict, -]) -def test_undeploy_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - client.undeploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.UndeployModelRequest() - -@pytest.mark.asyncio -async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=service.UndeployModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_model_async_from_dict(): - await test_undeploy_model_async(request_type=dict) - - -def test_undeploy_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UndeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.UndeployModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_undeploy_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_undeploy_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - service.UndeployModelRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_model( - service.UndeployModelRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportModelRequest, - dict, -]) -def test_export_model(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - client.export_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportModelRequest() - -@pytest.mark.asyncio -async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=service.ExportModelRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_model_async_from_dict(): - await test_export_model_async(request_type=dict) - - -def test_export_model_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_model_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportModelRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_export_model_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_model( - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -def test_export_model_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - service.ExportModelRequest(), - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - -@pytest.mark.asyncio -async def test_export_model_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_model( - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_export_model_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_model( - service.ExportModelRequest(), - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportEvaluatedExamplesRequest, - dict, -]) -def test_export_evaluated_examples(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_evaluated_examples), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_evaluated_examples(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportEvaluatedExamplesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_evaluated_examples_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_evaluated_examples), - '__call__') as call: - client.export_evaluated_examples() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportEvaluatedExamplesRequest() - -@pytest.mark.asyncio -async def test_export_evaluated_examples_async(transport: str = 'grpc_asyncio', request_type=service.ExportEvaluatedExamplesRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_evaluated_examples), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_evaluated_examples(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ExportEvaluatedExamplesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_evaluated_examples_async_from_dict(): - await test_export_evaluated_examples_async(request_type=dict) - - -def test_export_evaluated_examples_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportEvaluatedExamplesRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_evaluated_examples), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_evaluated_examples(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_evaluated_examples_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ExportEvaluatedExamplesRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_evaluated_examples), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_evaluated_examples(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_export_evaluated_examples_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_evaluated_examples), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_evaluated_examples( - name='name_value', - output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')) - assert arg == mock_val - - -def test_export_evaluated_examples_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_evaluated_examples( - service.ExportEvaluatedExamplesRequest(), - name='name_value', - output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), - ) - -@pytest.mark.asyncio -async def test_export_evaluated_examples_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_evaluated_examples), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_evaluated_examples( - name='name_value', - output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_export_evaluated_examples_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_evaluated_examples( - service.ExportEvaluatedExamplesRequest(), - name='name_value', - output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelEvaluationRequest, - dict, -]) -def test_get_model_evaluation(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation( - name='name_value', - annotation_spec_id='annotation_spec_id_value', - display_name='display_name_value', - evaluated_example_count=2446, - ) - response = client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.annotation_spec_id == 'annotation_spec_id_value' - assert response.display_name == 'display_name_value' - assert response.evaluated_example_count == 2446 - - -def test_get_model_evaluation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - client.get_model_evaluation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelEvaluationRequest() - -@pytest.mark.asyncio -async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=service.GetModelEvaluationRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( - name='name_value', - annotation_spec_id='annotation_spec_id_value', - display_name='display_name_value', - evaluated_example_count=2446, - )) - response = await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.annotation_spec_id == 'annotation_spec_id_value' - assert response.display_name == 'display_name_value' - assert response.evaluated_example_count == 2446 - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async_from_dict(): - await test_get_model_evaluation_async(request_type=dict) - - -def test_get_model_evaluation_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelEvaluationRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = model_evaluation.ModelEvaluation() - client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.GetModelEvaluationRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_model_evaluation_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_evaluation_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - service.GetModelEvaluationRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation( - service.GetModelEvaluationRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelEvaluationsRequest, - dict, -]) -def test_list_model_evaluations(request_type, transport: str = 'grpc'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - client.list_model_evaluations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelEvaluationsRequest() - -@pytest.mark.asyncio -async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=service.ListModelEvaluationsRequest): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_from_dict(): - await test_list_model_evaluations_async(request_type=dict) - - -def test_list_model_evaluations_field_headers(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelEvaluationsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = service.ListModelEvaluationsResponse() - client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluations_field_headers_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = service.ListModelEvaluationsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) - await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_model_evaluations_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelEvaluationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_model_evaluations_flattened_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = service.ListModelEvaluationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service.ListModelEvaluationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_error_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluations( - service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluations_pager(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluations(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) -def test_list_model_evaluations_pages(transport_name: str = "grpc"): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pager(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pages(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_model_evaluations(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.CreateDatasetRequest, - dict, -]) -def test_create_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'video_classification_dataset_metadata': {}, 'video_object_tracking_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'tables_dataset_metadata': {'primary_table_spec_id': 'primary_table_spec_id_value', 'target_column_spec_id': 'target_column_spec_id_value', 'weight_column_spec_id': 'weight_column_spec_id_value', 'ml_use_column_spec_id': 'ml_use_column_spec_id_value', 'target_column_correlations': {}, 'stats_update_time': {'seconds': 751, 'nanos': 543}}, 'name': 'name_value', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {}, 'etag': 'etag_value'} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.CreateDatasetRequest.meta.fields["dataset"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["dataset"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["dataset"][field])): - del request_init["dataset"][field][i][subfield] - else: - del request_init["dataset"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_dataset(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_create_dataset_rest_required_fields(request_type=service.CreateDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.create_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "dataset", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_create_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.CreateDatasetRequest.pb(service.CreateDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) - - request = service.CreateDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gca_dataset.Dataset() - - client.create_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_dataset_rest_bad_request(transport: str = 'rest', request_type=service.CreateDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_dataset(request) - - -def test_create_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) - - -def test_create_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - -def test_create_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetDatasetRequest, - dict, -]) -def test_get_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_dataset(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_get_dataset_rest_required_fields(request_type=service.GetDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = dataset.Dataset() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetDatasetRequest.pb(service.GetDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = dataset.Dataset.to_json(dataset.Dataset()) - - request = service.GetDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = dataset.Dataset() - - client.get_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_dataset_rest_bad_request(transport: str = 'rest', request_type=service.GetDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_dataset(request) - - -def test_get_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = dataset.Dataset() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) - - -def test_get_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - service.GetDatasetRequest(), - name='name_value', - ) - - -def test_get_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListDatasetsRequest, - dict, -]) -def test_list_datasets_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListDatasetsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_datasets(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_rest_required_fields(request_type=service.ListDatasetsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_datasets._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListDatasetsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListDatasetsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_datasets(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_datasets_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_datasets._get_unset_required_fields({}) - assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_datasets_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_datasets") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_datasets") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListDatasetsRequest.pb(service.ListDatasetsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListDatasetsResponse.to_json(service.ListDatasetsResponse()) - - request = service.ListDatasetsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListDatasetsResponse() - - client.list_datasets(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_datasets_rest_bad_request(transport: str = 'rest', request_type=service.ListDatasetsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_datasets(request) - - -def test_list_datasets_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListDatasetsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListDatasetsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_datasets(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/datasets" % client.transport._host, args[1]) - - -def test_list_datasets_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListDatasetsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - pager = client.list_datasets(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) - - pages = list(client.list_datasets(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.UpdateDatasetRequest, - dict, -]) -def test_update_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} - request_init["dataset"] = {'translation_dataset_metadata': {'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_dataset_metadata': {'classification_type': 1}, 'text_classification_dataset_metadata': {'classification_type': 1}, 'image_object_detection_dataset_metadata': {}, 'video_classification_dataset_metadata': {}, 'video_object_tracking_dataset_metadata': {}, 'text_extraction_dataset_metadata': {}, 'text_sentiment_dataset_metadata': {'sentiment_max': 1404}, 'tables_dataset_metadata': {'primary_table_spec_id': 'primary_table_spec_id_value', 'target_column_spec_id': 'target_column_spec_id_value', 'weight_column_spec_id': 'weight_column_spec_id_value', 'ml_use_column_spec_id': 'ml_use_column_spec_id_value', 'target_column_correlations': {}, 'stats_update_time': {'seconds': 751, 'nanos': 543}}, 'name': 'projects/sample1/locations/sample2/datasets/sample3', 'display_name': 'display_name_value', 'description': 'description_value', 'example_count': 1396, 'create_time': {}, 'etag': 'etag_value'} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.UpdateDatasetRequest.meta.fields["dataset"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["dataset"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["dataset"][field])): - del request_init["dataset"][field][i][subfield] - else: - del request_init["dataset"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - example_count=1396, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_dataset(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.example_count == 1396 - assert response.etag == 'etag_value' - - -def test_update_dataset_rest_required_fields(request_type=service.UpdateDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_dataset._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.update_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("dataset", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_update_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.UpdateDatasetRequest.pb(service.UpdateDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gca_dataset.Dataset.to_json(gca_dataset.Dataset()) - - request = service.UpdateDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gca_dataset.Dataset() - - client.update_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_dataset_rest_bad_request(transport: str = 'rest', request_type=service.UpdateDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_dataset(request) - - -def test_update_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_dataset.Dataset() - - # get arguments that satisfy an http rule for this method - sample_request = {'dataset': {'name': 'projects/sample1/locations/sample2/datasets/sample3'}} - - # get truthy value for each flattened field - mock_args = dict( - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_dataset.Dataset.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) - - -def test_update_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(translation_dataset_metadata=translation.TranslationDatasetMetadata(source_language_code='source_language_code_value')), - ) - - -def test_update_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeleteDatasetRequest, - dict, -]) -def test_delete_dataset_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_dataset(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_delete_dataset_rest_required_fields(request_type=service.DeleteDatasetRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_dataset._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_dataset(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_dataset_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.delete_dataset._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_dataset_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_dataset") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_dataset") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.DeleteDatasetRequest.pb(service.DeleteDatasetRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.DeleteDatasetRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.delete_dataset(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_delete_dataset_rest_bad_request(transport: str = 'rest', request_type=service.DeleteDatasetRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_dataset(request) - - -def test_delete_dataset_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_dataset(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}" % client.transport._host, args[1]) - - -def test_delete_dataset_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - service.DeleteDatasetRequest(), - name='name_value', - ) - - -def test_delete_dataset_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ImportDataRequest, - dict, -]) -def test_import_data_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.import_data(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_import_data_rest_required_fields(request_type=service.ImportDataRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).import_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.import_data(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_import_data_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.import_data._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "inputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_import_data_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_import_data") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_import_data") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ImportDataRequest.pb(service.ImportDataRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.ImportDataRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.import_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_import_data_rest_bad_request(transport: str = 'rest', request_type=service.ImportDataRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.import_data(request) - - -def test_import_data_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.import_data(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}:importData" % client.transport._host, args[1]) - - -def test_import_data_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - service.ImportDataRequest(), - name='name_value', - input_config=io.InputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - ) - - -def test_import_data_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportDataRequest, - dict, -]) -def test_export_data_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.export_data(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_export_data_rest_required_fields(request_type=service.ExportDataRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_data._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.export_data(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_export_data_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.export_data._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_export_data_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_export_data") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_data") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ExportDataRequest.pb(service.ExportDataRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.ExportDataRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.export_data(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_export_data_rest_bad_request(transport: str = 'rest', request_type=service.ExportDataRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.export_data(request) - - -def test_export_data_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.export_data(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*}:exportData" % client.transport._host, args[1]) - - -def test_export_data_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - service.ExportDataRequest(), - name='name_value', - output_config=io.OutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_export_data_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetAnnotationSpecRequest, - dict, -]) -def test_get_annotation_spec_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - example_count=1396, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = annotation_spec.AnnotationSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_annotation_spec(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.example_count == 1396 - - -def test_get_annotation_spec_rest_required_fields(request_type=service.GetAnnotationSpecRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_annotation_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = annotation_spec.AnnotationSpec() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = annotation_spec.AnnotationSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_annotation_spec(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_annotation_spec_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_annotation_spec._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_annotation_spec_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_annotation_spec") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_annotation_spec") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetAnnotationSpecRequest.pb(service.GetAnnotationSpecRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = annotation_spec.AnnotationSpec.to_json(annotation_spec.AnnotationSpec()) - - request = service.GetAnnotationSpecRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = annotation_spec.AnnotationSpec() - - client.get_annotation_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_annotation_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetAnnotationSpecRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_annotation_spec(request) - - -def test_get_annotation_spec_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = annotation_spec.AnnotationSpec() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/annotationSpecs/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = annotation_spec.AnnotationSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_annotation_spec(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}" % client.transport._host, args[1]) - - -def test_get_annotation_spec_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -def test_get_annotation_spec_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetTableSpecRequest, - dict, -]) -def test_get_table_spec_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table_spec.TableSpec( - name='name_value', - time_column_spec_id='time_column_spec_id_value', - row_count=992, - valid_row_count=1615, - column_count=1302, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table_spec.TableSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_table_spec(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table_spec.TableSpec) - assert response.name == 'name_value' - assert response.time_column_spec_id == 'time_column_spec_id_value' - assert response.row_count == 992 - assert response.valid_row_count == 1615 - assert response.column_count == 1302 - assert response.etag == 'etag_value' - - -def test_get_table_spec_rest_required_fields(request_type=service.GetTableSpecRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_table_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_table_spec._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("field_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table_spec.TableSpec() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table_spec.TableSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_table_spec(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_table_spec_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_table_spec._get_unset_required_fields({}) - assert set(unset_fields) == (set(("fieldMask", )) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_table_spec_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_table_spec") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_table_spec") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetTableSpecRequest.pb(service.GetTableSpecRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table_spec.TableSpec.to_json(table_spec.TableSpec()) - - request = service.GetTableSpecRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table_spec.TableSpec() - - client.get_table_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_table_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetTableSpecRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_table_spec(request) - - -def test_get_table_spec_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table_spec.TableSpec() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table_spec.TableSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_table_spec(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}" % client.transport._host, args[1]) - - -def test_get_table_spec_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_table_spec( - service.GetTableSpecRequest(), - name='name_value', - ) - - -def test_get_table_spec_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListTableSpecsRequest, - dict, -]) -def test_list_table_specs_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListTableSpecsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListTableSpecsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_table_specs(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTableSpecsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_table_specs_rest_required_fields(request_type=service.ListTableSpecsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_table_specs._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_table_specs._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("field_mask", "filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListTableSpecsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListTableSpecsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_table_specs(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_table_specs_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_table_specs._get_unset_required_fields({}) - assert set(unset_fields) == (set(("fieldMask", "filter", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_table_specs_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_table_specs") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_table_specs") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListTableSpecsRequest.pb(service.ListTableSpecsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListTableSpecsResponse.to_json(service.ListTableSpecsResponse()) - - request = service.ListTableSpecsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListTableSpecsResponse() - - client.list_table_specs(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_table_specs_rest_bad_request(transport: str = 'rest', request_type=service.ListTableSpecsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_table_specs(request) - - -def test_list_table_specs_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListTableSpecsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListTableSpecsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_table_specs(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs" % client.transport._host, args[1]) - - -def test_list_table_specs_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_table_specs( - service.ListTableSpecsRequest(), - parent='parent_value', - ) - - -def test_list_table_specs_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - next_page_token='abc', - ), - service.ListTableSpecsResponse( - table_specs=[], - next_page_token='def', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - ], - next_page_token='ghi', - ), - service.ListTableSpecsResponse( - table_specs=[ - table_spec.TableSpec(), - table_spec.TableSpec(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListTableSpecsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3'} - - pager = client.list_table_specs(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table_spec.TableSpec) - for i in results) - - pages = list(client.list_table_specs(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.UpdateTableSpecRequest, - dict, -]) -def test_update_table_spec_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'}} - request_init["table_spec"] = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4', 'time_column_spec_id': 'time_column_spec_id_value', 'row_count': 992, 'valid_row_count': 1615, 'column_count': 1302, 'input_configs': [{'gcs_source': {'input_uris': ['input_uris_value1', 'input_uris_value2']}, 'bigquery_source': {'input_uri': 'input_uri_value'}, 'params': {}}], 'etag': 'etag_value'} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.UpdateTableSpecRequest.meta.fields["table_spec"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["table_spec"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["table_spec"][field])): - del request_init["table_spec"][field][i][subfield] - else: - del request_init["table_spec"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_table_spec.TableSpec( - name='name_value', - time_column_spec_id='time_column_spec_id_value', - row_count=992, - valid_row_count=1615, - column_count=1302, - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_table_spec.TableSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_table_spec(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_table_spec.TableSpec) - assert response.name == 'name_value' - assert response.time_column_spec_id == 'time_column_spec_id_value' - assert response.row_count == 992 - assert response.valid_row_count == 1615 - assert response.column_count == 1302 - assert response.etag == 'etag_value' - - -def test_update_table_spec_rest_required_fields(request_type=service.UpdateTableSpecRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_table_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_table_spec._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gca_table_spec.TableSpec() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gca_table_spec.TableSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_table_spec(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_table_spec_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.update_table_spec._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("tableSpec", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_table_spec_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_update_table_spec") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_table_spec") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.UpdateTableSpecRequest.pb(service.UpdateTableSpecRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gca_table_spec.TableSpec.to_json(gca_table_spec.TableSpec()) - - request = service.UpdateTableSpecRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gca_table_spec.TableSpec() - - client.update_table_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_table_spec_rest_bad_request(transport: str = 'rest', request_type=service.UpdateTableSpecRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_table_spec(request) - - -def test_update_table_spec_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_table_spec.TableSpec() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'}} - - # get truthy value for each flattened field - mock_args = dict( - table_spec=gca_table_spec.TableSpec(name='name_value'), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_table_spec.TableSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_table_spec(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}" % client.transport._host, args[1]) - - -def test_update_table_spec_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table_spec( - service.UpdateTableSpecRequest(), - table_spec=gca_table_spec.TableSpec(name='name_value'), - ) - - -def test_update_table_spec_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetColumnSpecRequest, - dict, -]) -def test_get_column_spec_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = column_spec.ColumnSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = column_spec.ColumnSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_column_spec(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, column_spec.ColumnSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_get_column_spec_rest_required_fields(request_type=service.GetColumnSpecRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_column_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_column_spec._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("field_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = column_spec.ColumnSpec() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = column_spec.ColumnSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_column_spec(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_column_spec_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_column_spec._get_unset_required_fields({}) - assert set(unset_fields) == (set(("fieldMask", )) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_column_spec_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_column_spec") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_column_spec") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetColumnSpecRequest.pb(service.GetColumnSpecRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = column_spec.ColumnSpec.to_json(column_spec.ColumnSpec()) - - request = service.GetColumnSpecRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = column_spec.ColumnSpec() - - client.get_column_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_column_spec_rest_bad_request(transport: str = 'rest', request_type=service.GetColumnSpecRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_column_spec(request) - - -def test_get_column_spec_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = column_spec.ColumnSpec() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = column_spec.ColumnSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_column_spec(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}" % client.transport._host, args[1]) - - -def test_get_column_spec_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_column_spec( - service.GetColumnSpecRequest(), - name='name_value', - ) - - -def test_get_column_spec_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListColumnSpecsRequest, - dict, -]) -def test_list_column_specs_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListColumnSpecsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListColumnSpecsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_column_specs(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListColumnSpecsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_column_specs_rest_required_fields(request_type=service.ListColumnSpecsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_column_specs._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_column_specs._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("field_mask", "filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListColumnSpecsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListColumnSpecsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_column_specs(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_column_specs_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_column_specs._get_unset_required_fields({}) - assert set(unset_fields) == (set(("fieldMask", "filter", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_column_specs_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_column_specs") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_column_specs") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListColumnSpecsRequest.pb(service.ListColumnSpecsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListColumnSpecsResponse.to_json(service.ListColumnSpecsResponse()) - - request = service.ListColumnSpecsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListColumnSpecsResponse() - - client.list_column_specs(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_column_specs_rest_bad_request(transport: str = 'rest', request_type=service.ListColumnSpecsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_column_specs(request) - - -def test_list_column_specs_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListColumnSpecsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListColumnSpecsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_column_specs(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs" % client.transport._host, args[1]) - - -def test_list_column_specs_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_column_specs( - service.ListColumnSpecsRequest(), - parent='parent_value', - ) - - -def test_list_column_specs_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - next_page_token='abc', - ), - service.ListColumnSpecsResponse( - column_specs=[], - next_page_token='def', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - ], - next_page_token='ghi', - ), - service.ListColumnSpecsResponse( - column_specs=[ - column_spec.ColumnSpec(), - column_spec.ColumnSpec(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListColumnSpecsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4'} - - pager = client.list_column_specs(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, column_spec.ColumnSpec) - for i in results) - - pages = list(client.list_column_specs(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.UpdateColumnSpecRequest, - dict, -]) -def test_update_column_spec_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'column_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'}} - request_init["column_spec"] = {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5', 'data_type': {'list_element_type': {}, 'struct_type': {'fields': {}}, 'time_format': 'time_format_value', 'type_code': 3, 'nullable': True}, 'display_name': 'display_name_value', 'data_stats': {'float64_stats': {'mean': 0.417, 'standard_deviation': 0.1907, 'quantiles': [0.983, 0.984], 'histogram_buckets': [{'min_': 0.419, 'max_': 0.421, 'count': 553}]}, 'string_stats': {'top_unigram_stats': [{'value': 'value_value', 'count': 553}]}, 'timestamp_stats': {'granular_stats': {}}, 'array_stats': {'member_stats': {}}, 'struct_stats': {'field_stats': {}}, 'category_stats': {'top_category_stats': [{'value': 'value_value', 'count': 553}]}, 'distinct_value_count': 2150, 'null_value_count': 1727, 'valid_value_count': 1812}, 'top_correlated_columns': [{'column_spec_id': 'column_spec_id_value', 'correlation_stats': {'cramers_v': 0.962}}], 'etag': 'etag_value'} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.UpdateColumnSpecRequest.meta.fields["column_spec"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["column_spec"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["column_spec"][field])): - del request_init["column_spec"][field][i][subfield] - else: - del request_init["column_spec"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_column_spec.ColumnSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_column_spec.ColumnSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_column_spec(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_column_spec.ColumnSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_update_column_spec_rest_required_fields(request_type=service.UpdateColumnSpecRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_column_spec._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_column_spec._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gca_column_spec.ColumnSpec() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gca_column_spec.ColumnSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_column_spec(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_column_spec_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.update_column_spec._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("columnSpec", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_column_spec_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_update_column_spec") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_update_column_spec") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.UpdateColumnSpecRequest.pb(service.UpdateColumnSpecRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gca_column_spec.ColumnSpec.to_json(gca_column_spec.ColumnSpec()) - - request = service.UpdateColumnSpecRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gca_column_spec.ColumnSpec() - - client.update_column_spec(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_column_spec_rest_bad_request(transport: str = 'rest', request_type=service.UpdateColumnSpecRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'column_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_column_spec(request) - - -def test_update_column_spec_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gca_column_spec.ColumnSpec() - - # get arguments that satisfy an http rule for this method - sample_request = {'column_spec': {'name': 'projects/sample1/locations/sample2/datasets/sample3/tableSpecs/sample4/columnSpecs/sample5'}} - - # get truthy value for each flattened field - mock_args = dict( - column_spec=gca_column_spec.ColumnSpec(name='name_value'), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gca_column_spec.ColumnSpec.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_column_spec(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}" % client.transport._host, args[1]) - - -def test_update_column_spec_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_column_spec( - service.UpdateColumnSpecRequest(), - column_spec=gca_column_spec.ColumnSpec(name='name_value'), - ) - - -def test_update_column_spec_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.CreateModelRequest, - dict, -]) -def test_create_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request_init["model"] = {'translation_model_metadata': {'base_model': 'base_model_value', 'source_language_code': 'source_language_code_value', 'target_language_code': 'target_language_code_value'}, 'image_classification_model_metadata': {'base_model_id': 'base_model_id_value', 'train_budget': 1272, 'train_cost': 1078, 'stop_reason': 'stop_reason_value', 'model_type': 'model_type_value', 'node_qps': 0.857, 'node_count': 1070}, 'text_classification_model_metadata': {'classification_type': 1}, 'image_object_detection_model_metadata': {'model_type': 'model_type_value', 'node_count': 1070, 'node_qps': 0.857, 'stop_reason': 'stop_reason_value', 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881}, 'video_classification_model_metadata': {}, 'video_object_tracking_model_metadata': {}, 'text_extraction_model_metadata': {'model_hint': 'model_hint_value'}, 'tables_model_metadata': {'optimization_objective_recall_value': 0.37270000000000003, 'optimization_objective_precision_value': 0.4072, 'target_column_spec': {'name': 'name_value', 'data_type': {'list_element_type': {}, 'struct_type': {'fields': {}}, 'time_format': 'time_format_value', 'type_code': 3, 'nullable': True}, 'display_name': 'display_name_value', 'data_stats': {'float64_stats': {'mean': 0.417, 'standard_deviation': 0.1907, 'quantiles': [0.983, 0.984], 'histogram_buckets': [{'min_': 0.419, 'max_': 0.421, 'count': 553}]}, 'string_stats': {'top_unigram_stats': [{'value': 'value_value', 'count': 553}]}, 'timestamp_stats': {'granular_stats': {}}, 'array_stats': {'member_stats': {}}, 'struct_stats': {'field_stats': {}}, 'category_stats': {'top_category_stats': [{'value': 'value_value', 'count': 553}]}, 'distinct_value_count': 2150, 'null_value_count': 1727, 'valid_value_count': 1812}, 'top_correlated_columns': [{'column_spec_id': 'column_spec_id_value', 'correlation_stats': {'cramers_v': 0.962}}], 'etag': 'etag_value'}, 'input_feature_column_specs': {}, 'optimization_objective': 'optimization_objective_value', 'tables_model_column_info': [{'column_spec_name': 'column_spec_name_value', 'column_display_name': 'column_display_name_value', 'feature_importance': 0.1917}], 'train_budget_milli_node_hours': 3075, 'train_cost_milli_node_hours': 2881, 'disable_early_stopping': True}, 'text_sentiment_model_metadata': {}, 'name': 'name_value', 'display_name': 'display_name_value', 'dataset_id': 'dataset_id_value', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'deployment_state': 1} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = service.CreateModelRequest.meta.fields["model"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["model"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["model"][field])): - del request_init["model"][field][i][subfield] - else: - del request_init["model"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_model_rest_required_fields(request_type=service.CreateModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.create_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "model", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_create_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_create_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.CreateModelRequest.pb(service.CreateModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.CreateModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_model_rest_bad_request(transport: str = 'rest', request_type=service.CreateModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_model(request) - - -def test_create_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) - - -def test_create_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_model( - service.CreateModelRequest(), - parent='parent_value', - model=gca_model.Model(translation_model_metadata=translation.TranslationModelMetadata(base_model='base_model_value')), - ) - - -def test_create_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelRequest, - dict, -]) -def test_get_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model.Model( - name='name_value', - display_name='display_name_value', - dataset_id='dataset_id_value', - deployment_state=model.Model.DeploymentState.DEPLOYED, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_model(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.dataset_id == 'dataset_id_value' - assert response.deployment_state == model.Model.DeploymentState.DEPLOYED - - -def test_get_model_rest_required_fields(request_type=service.GetModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = model.Model() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetModelRequest.pb(service.GetModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = model.Model.to_json(model.Model()) - - request = service.GetModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = model.Model() - - client.get_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_model_rest_bad_request(transport: str = 'rest', request_type=service.GetModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_model(request) - - -def test_get_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model.Model() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model.Model.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) - - -def test_get_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - service.GetModelRequest(), - name='name_value', - ) - - -def test_get_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelsRequest, - dict, -]) -def test_list_models_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_models(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_rest_required_fields(request_type=service.ListModelsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_models._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListModelsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListModelsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_models(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_models_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_models._get_unset_required_fields({}) - assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_models_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_models") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_models") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListModelsRequest.pb(service.ListModelsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListModelsResponse.to_json(service.ListModelsResponse()) - - request = service.ListModelsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListModelsResponse() - - client.list_models(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_models_rest_bad_request(transport: str = 'rest', request_type=service.ListModelsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_models(request) - - -def test_list_models_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_models(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*}/models" % client.transport._host, args[1]) - - -def test_list_models_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - service.ListModelsResponse( - model=[], - next_page_token='def', - ), - service.ListModelsResponse( - model=[ - model.Model(), - ], - next_page_token='ghi', - ), - service.ListModelsResponse( - model=[ - model.Model(), - model.Model(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListModelsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2'} - - pager = client.list_models(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) - - pages = list(client.list_models(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - service.DeleteModelRequest, - dict, -]) -def test_delete_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_delete_model_rest_required_fields(request_type=service.DeleteModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.delete_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_delete_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_delete_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.DeleteModelRequest.pb(service.DeleteModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.DeleteModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.delete_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_delete_model_rest_bad_request(transport: str = 'rest', request_type=service.DeleteModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_model(request) - - -def test_delete_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}" % client.transport._host, args[1]) - - -def test_delete_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - service.DeleteModelRequest(), - name='name_value', - ) - - -def test_delete_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.DeployModelRequest, - dict, -]) -def test_deploy_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.deploy_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_deploy_model_rest_required_fields(request_type=service.DeployModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).deploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.deploy_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_deploy_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.deploy_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_deploy_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_deploy_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_deploy_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.DeployModelRequest.pb(service.DeployModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.DeployModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.deploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_deploy_model_rest_bad_request(transport: str = 'rest', request_type=service.DeployModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.deploy_model(request) - - -def test_deploy_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.deploy_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:deploy" % client.transport._host, args[1]) - - -def test_deploy_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - service.DeployModelRequest(), - name='name_value', - ) - - -def test_deploy_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.UndeployModelRequest, - dict, -]) -def test_undeploy_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.undeploy_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_undeploy_model_rest_required_fields(request_type=service.UndeployModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undeploy_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.undeploy_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_undeploy_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.undeploy_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undeploy_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_undeploy_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_undeploy_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.UndeployModelRequest.pb(service.UndeployModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.UndeployModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.undeploy_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_undeploy_model_rest_bad_request(transport: str = 'rest', request_type=service.UndeployModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.undeploy_model(request) - - -def test_undeploy_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.undeploy_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:undeploy" % client.transport._host, args[1]) - - -def test_undeploy_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - service.UndeployModelRequest(), - name='name_value', - ) - - -def test_undeploy_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportModelRequest, - dict, -]) -def test_export_model_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.export_model(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_export_model_rest_required_fields(request_type=service.ExportModelRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_model._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.export_model(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_export_model_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.export_model._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_export_model_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_export_model") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_model") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ExportModelRequest.pb(service.ExportModelRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.ExportModelRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.export_model(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_export_model_rest_bad_request(transport: str = 'rest', request_type=service.ExportModelRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.export_model(request) - - -def test_export_model_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.export_model(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:export" % client.transport._host, args[1]) - - -def test_export_model_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - service.ExportModelRequest(), - name='name_value', - output_config=io.ModelExportOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_export_model_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ExportEvaluatedExamplesRequest, - dict, -]) -def test_export_evaluated_examples_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.export_evaluated_examples(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_export_evaluated_examples_rest_required_fields(request_type=service.ExportEvaluatedExamplesRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_evaluated_examples._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).export_evaluated_examples._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.export_evaluated_examples(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_export_evaluated_examples_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.export_evaluated_examples._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "outputConfig", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_export_evaluated_examples_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_export_evaluated_examples") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_export_evaluated_examples") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ExportEvaluatedExamplesRequest.pb(service.ExportEvaluatedExamplesRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = service.ExportEvaluatedExamplesRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.export_evaluated_examples(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_export_evaluated_examples_rest_bad_request(transport: str = 'rest', request_type=service.ExportEvaluatedExamplesRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.export_evaluated_examples(request) - - -def test_export_evaluated_examples_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.export_evaluated_examples(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples" % client.transport._host, args[1]) - - -def test_export_evaluated_examples_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_evaluated_examples( - service.ExportEvaluatedExamplesRequest(), - name='name_value', - output_config=io.ExportEvaluatedExamplesOutputConfig(bigquery_destination=io.BigQueryDestination(output_uri='output_uri_value')), - ) - - -def test_export_evaluated_examples_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.GetModelEvaluationRequest, - dict, -]) -def test_get_model_evaluation_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model_evaluation.ModelEvaluation( - name='name_value', - annotation_spec_id='annotation_spec_id_value', - display_name='display_name_value', - evaluated_example_count=2446, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model_evaluation.ModelEvaluation.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_model_evaluation(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.annotation_spec_id == 'annotation_spec_id_value' - assert response.display_name == 'display_name_value' - assert response.evaluated_example_count == 2446 - - -def test_get_model_evaluation_rest_required_fields(request_type=service.GetModelEvaluationRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_model_evaluation._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = model_evaluation.ModelEvaluation() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = model_evaluation.ModelEvaluation.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_model_evaluation(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_model_evaluation_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.get_model_evaluation._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_model_evaluation_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_get_model_evaluation") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_get_model_evaluation") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.GetModelEvaluationRequest.pb(service.GetModelEvaluationRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = model_evaluation.ModelEvaluation.to_json(model_evaluation.ModelEvaluation()) - - request = service.GetModelEvaluationRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = model_evaluation.ModelEvaluation() - - client.get_model_evaluation(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_model_evaluation_rest_bad_request(transport: str = 'rest', request_type=service.GetModelEvaluationRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_model_evaluation(request) - - -def test_get_model_evaluation_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = model_evaluation.ModelEvaluation() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3/modelEvaluations/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = model_evaluation.ModelEvaluation.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_model_evaluation(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" % client.transport._host, args[1]) - - -def test_get_model_evaluation_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - service.GetModelEvaluationRequest(), - name='name_value', - ) - - -def test_get_model_evaluation_rest_error(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - service.ListModelEvaluationsRequest, - dict, -]) -def test_list_model_evaluations_rest(request_type): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelEvaluationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_model_evaluations(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_rest_required_fields(request_type=service.ListModelEvaluationsRequest): - transport_class = transports.AutoMlRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_model_evaluations._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("filter", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = service.ListModelEvaluationsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = service.ListModelEvaluationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_model_evaluations(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_model_evaluations_rest_unset_required_fields(): - transport = transports.AutoMlRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.list_model_evaluations._get_unset_required_fields({}) - assert set(unset_fields) == (set(("filter", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_model_evaluations_rest_interceptors(null_interceptor): - transport = transports.AutoMlRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.AutoMlRestInterceptor(), - ) - client = AutoMlClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.AutoMlRestInterceptor, "post_list_model_evaluations") as post, \ - mock.patch.object(transports.AutoMlRestInterceptor, "pre_list_model_evaluations") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = service.ListModelEvaluationsRequest.pb(service.ListModelEvaluationsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = service.ListModelEvaluationsResponse.to_json(service.ListModelEvaluationsResponse()) - - request = service.ListModelEvaluationsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = service.ListModelEvaluationsResponse() - - client.list_model_evaluations(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_model_evaluations_rest_bad_request(transport: str = 'rest', request_type=service.ListModelEvaluationsRequest): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_model_evaluations(request) - - -def test_list_model_evaluations_rest_flattened(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = service.ListModelEvaluationsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = service.ListModelEvaluationsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_model_evaluations(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations" % client.transport._host, args[1]) - - -def test_list_model_evaluations_rest_flattened_error(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluations_rest_pager(transport: str = 'rest'): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[], - next_page_token='def', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - service.ListModelEvaluationsResponse( - model_evaluation=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(service.ListModelEvaluationsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/locations/sample2/models/sample3'} - - pager = client.list_model_evaluations(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) - - pages = list(client.list_model_evaluations(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoMlClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = AutoMlClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = AutoMlClient( - client_options=options, - credentials=ga_credentials.AnonymousCredentials() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = AutoMlClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = AutoMlClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.AutoMlGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.AutoMlGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.AutoMlGrpcTransport, - transports.AutoMlGrpcAsyncIOTransport, - transports.AutoMlRestTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "rest", -]) -def test_transport_kind(transport_name): - transport = AutoMlClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert transport.kind == transport_name - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.AutoMlGrpcTransport, - ) - -def test_auto_ml_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.AutoMlTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_auto_ml_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.AutoMlTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_dataset', - 'get_dataset', - 'list_datasets', - 'update_dataset', - 'delete_dataset', - 'import_data', - 'export_data', - 'get_annotation_spec', - 'get_table_spec', - 'list_table_specs', - 'update_table_spec', - 'get_column_spec', - 'list_column_specs', - 'update_column_spec', - 'create_model', - 'get_model', - 'list_models', - 'delete_model', - 'deploy_model', - 'undeploy_model', - 'export_model', - 'export_evaluated_examples', - 'get_model_evaluation', - 'list_model_evaluations', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - 'kind', - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_auto_ml_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoMlTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_auto_ml_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1beta1.services.auto_ml.transports.AutoMlTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.AutoMlTransport() - adc.assert_called_once() - - -def test_auto_ml_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - AutoMlClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoMlGrpcTransport, - transports.AutoMlGrpcAsyncIOTransport, - ], -) -def test_auto_ml_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.AutoMlGrpcTransport, - transports.AutoMlGrpcAsyncIOTransport, - transports.AutoMlRestTransport, - ], -) -def test_auto_ml_transport_auth_gdch_credentials(transport_class): - host = 'https://language.com' - api_audience_tests = [None, 'https://language2.com'] - api_audience_expect = [host, 'https://language2.com'] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with( - e - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.AutoMlGrpcTransport, grpc_helpers), - (transports.AutoMlGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_auto_ml_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) -def test_auto_ml_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - -def test_auto_ml_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: - transports.AutoMlRestTransport ( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -def test_auto_ml_rest_lro_client(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_auto_ml_host_no_port(transport_name): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com' - ) - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_auto_ml_host_with_port(transport_name): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:8000' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com:8000' - ) - -@pytest.mark.parametrize("transport_name", [ - "rest", -]) -def test_auto_ml_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() - client1 = AutoMlClient( - credentials=creds1, - transport=transport_name, - ) - client2 = AutoMlClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.create_dataset._session - session2 = client2.transport.create_dataset._session - assert session1 != session2 - session1 = client1.transport.get_dataset._session - session2 = client2.transport.get_dataset._session - assert session1 != session2 - session1 = client1.transport.list_datasets._session - session2 = client2.transport.list_datasets._session - assert session1 != session2 - session1 = client1.transport.update_dataset._session - session2 = client2.transport.update_dataset._session - assert session1 != session2 - session1 = client1.transport.delete_dataset._session - session2 = client2.transport.delete_dataset._session - assert session1 != session2 - session1 = client1.transport.import_data._session - session2 = client2.transport.import_data._session - assert session1 != session2 - session1 = client1.transport.export_data._session - session2 = client2.transport.export_data._session - assert session1 != session2 - session1 = client1.transport.get_annotation_spec._session - session2 = client2.transport.get_annotation_spec._session - assert session1 != session2 - session1 = client1.transport.get_table_spec._session - session2 = client2.transport.get_table_spec._session - assert session1 != session2 - session1 = client1.transport.list_table_specs._session - session2 = client2.transport.list_table_specs._session - assert session1 != session2 - session1 = client1.transport.update_table_spec._session - session2 = client2.transport.update_table_spec._session - assert session1 != session2 - session1 = client1.transport.get_column_spec._session - session2 = client2.transport.get_column_spec._session - assert session1 != session2 - session1 = client1.transport.list_column_specs._session - session2 = client2.transport.list_column_specs._session - assert session1 != session2 - session1 = client1.transport.update_column_spec._session - session2 = client2.transport.update_column_spec._session - assert session1 != session2 - session1 = client1.transport.create_model._session - session2 = client2.transport.create_model._session - assert session1 != session2 - session1 = client1.transport.get_model._session - session2 = client2.transport.get_model._session - assert session1 != session2 - session1 = client1.transport.list_models._session - session2 = client2.transport.list_models._session - assert session1 != session2 - session1 = client1.transport.delete_model._session - session2 = client2.transport.delete_model._session - assert session1 != session2 - session1 = client1.transport.deploy_model._session - session2 = client2.transport.deploy_model._session - assert session1 != session2 - session1 = client1.transport.undeploy_model._session - session2 = client2.transport.undeploy_model._session - assert session1 != session2 - session1 = client1.transport.export_model._session - session2 = client2.transport.export_model._session - assert session1 != session2 - session1 = client1.transport.export_evaluated_examples._session - session2 = client2.transport.export_evaluated_examples._session - assert session1 != session2 - session1 = client1.transport.get_model_evaluation._session - session2 = client2.transport.get_model_evaluation._session - assert session1 != session2 - session1 = client1.transport.list_model_evaluations._session - session2 = client2.transport.list_model_evaluations._session - assert session1 != session2 -def test_auto_ml_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoMlGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_auto_ml_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.AutoMlGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) -def test_auto_ml_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport]) -def test_auto_ml_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_auto_ml_grpc_lro_client(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_auto_ml_grpc_lro_async_client(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotation_spec_path(): - project = "squid" - location = "clam" - dataset = "whelk" - annotation_spec = "octopus" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - actual = AutoMlClient.annotation_spec_path(project, location, dataset, annotation_spec) - assert expected == actual - - -def test_parse_annotation_spec_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "dataset": "cuttlefish", - "annotation_spec": "mussel", - } - path = AutoMlClient.annotation_spec_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_annotation_spec_path(path) - assert expected == actual - -def test_column_spec_path(): - project = "winkle" - location = "nautilus" - dataset = "scallop" - table_spec = "abalone" - column_spec = "squid" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, column_spec=column_spec, ) - actual = AutoMlClient.column_spec_path(project, location, dataset, table_spec, column_spec) - assert expected == actual - - -def test_parse_column_spec_path(): - expected = { - "project": "clam", - "location": "whelk", - "dataset": "octopus", - "table_spec": "oyster", - "column_spec": "nudibranch", - } - path = AutoMlClient.column_spec_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_column_spec_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = AutoMlClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = AutoMlClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_dataset_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = AutoMlClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = AutoMlClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_model_path(path) - assert expected == actual - -def test_model_evaluation_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - model_evaluation = "nautilus" - expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(project=project, location=location, model=model, model_evaluation=model_evaluation, ) - actual = AutoMlClient.model_evaluation_path(project, location, model, model_evaluation) - assert expected == actual - - -def test_parse_model_evaluation_path(): - expected = { - "project": "scallop", - "location": "abalone", - "model": "squid", - "model_evaluation": "clam", - } - path = AutoMlClient.model_evaluation_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_model_evaluation_path(path) - assert expected == actual - -def test_table_spec_path(): - project = "whelk" - location = "octopus" - dataset = "oyster" - table_spec = "nudibranch" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format(project=project, location=location, dataset=dataset, table_spec=table_spec, ) - actual = AutoMlClient.table_spec_path(project, location, dataset, table_spec) - assert expected == actual - - -def test_parse_table_spec_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - "dataset": "winkle", - "table_spec": "nautilus", - } - path = AutoMlClient.table_spec_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_table_spec_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = AutoMlClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = AutoMlClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = AutoMlClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = AutoMlClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = AutoMlClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = AutoMlClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = AutoMlClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = AutoMlClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = AutoMlClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = AutoMlClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = AutoMlClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.AutoMlTransport, '_prep_wrapped_messages') as prep: - transport_class = AutoMlClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = AutoMlAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'rest', - 'grpc', - ] - for transport in transports: - client = AutoMlClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - -@pytest.mark.parametrize("client_class,transport_class", [ - (AutoMlClient, transports.AutoMlGrpcTransport), - (AutoMlAsyncClient, transports.AutoMlGrpcAsyncIOTransport), -]) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py deleted file mode 100644 index 55cc6d75..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/automl_v1beta1/test_prediction_service.py +++ /dev/null @@ -1,2270 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable -from google.protobuf import json_format -import json -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.automl_v1beta1.services.prediction_service import PredictionServiceAsyncClient -from google.cloud.automl_v1beta1.services.prediction_service import PredictionServiceClient -from google.cloud.automl_v1beta1.services.prediction_service import transports -from google.cloud.automl_v1beta1.types import annotation_payload -from google.cloud.automl_v1beta1.types import data_items -from google.cloud.automl_v1beta1.types import geometry -from google.cloud.automl_v1beta1.types import io -from google.cloud.automl_v1beta1.types import operations -from google.cloud.automl_v1beta1.types import prediction_service -from google.cloud.automl_v1beta1.types import text_segment -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -from google.protobuf import struct_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PredictionServiceClient._get_default_mtls_endpoint(None) is None - assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class,transport_name", [ - (PredictionServiceClient, "grpc"), - (PredictionServiceAsyncClient, "grpc_asyncio"), - (PredictionServiceClient, "rest"), -]) -def test_prediction_service_client_from_service_account_info(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PredictionServiceGrpcTransport, "grpc"), - (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.PredictionServiceRestTransport, "rest"), -]) -def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class,transport_name", [ - (PredictionServiceClient, "grpc"), - (PredictionServiceAsyncClient, "grpc_asyncio"), - (PredictionServiceClient, "rest"), -]) -def test_prediction_service_client_from_service_account_file(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://automl.googleapis.com' - ) - - -def test_prediction_service_client_get_transport_class(): - transport = PredictionServiceClient.get_transport_class() - available_transports = [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceRestTransport, - ] - assert transport in available_transports - - transport = PredictionServiceClient.get_transport_class("grpc") - assert transport == transports.PredictionServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class(transport=transport_name) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class(transport=transport_name) - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions(api_audience="https://language.googleapis.com") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com" - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "true"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", "false"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, PredictionServiceAsyncClient -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest"), -]) -def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), - (PredictionServiceClient, transports.PredictionServiceRestTransport, "rest", None), -]) -def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -def test_prediction_service_client_client_options_from_dict(): - with mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PredictionServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), -]) -def test_prediction_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=None, - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.PredictRequest, - dict, -]) -def test_predict(request_type, transport: str = 'grpc'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse( - ) - response = client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - - -def test_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - client.predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - -@pytest.mark.asyncio -async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( - )) - response = await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - - -@pytest.mark.asyncio -async def test_predict_async_from_dict(): - await test_predict_async(request_type=dict) - - -def test_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = prediction_service.PredictResponse() - client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.predict( - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].payload - mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - - -def test_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - -@pytest.mark.asyncio -async def test_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.predict( - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].payload - mock_val = data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - -@pytest.mark.asyncio -async def test_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.predict( - prediction_service.PredictRequest(), - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.BatchPredictRequest, - dict, -]) -def test_batch_predict(request_type, transport: str = 'grpc'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.BatchPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - client.batch_predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.BatchPredictRequest() - -@pytest.mark.asyncio -async def test_batch_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.BatchPredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.BatchPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_predict_async_from_dict(): - await test_batch_predict_async(request_type=dict) - - -def test_batch_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.BatchPredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.BatchPredictRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_batch_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_predict( - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - arg = args[0].output_config - mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - - -def test_batch_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_predict( - prediction_service.BatchPredictRequest(), - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - -@pytest.mark.asyncio -async def test_batch_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_predict( - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].input_config - mock_val = io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])) - assert arg == mock_val - arg = args[0].output_config - mock_val = io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - arg = args[0].params - mock_val = {'key_value': 'value_value'} - assert arg == mock_val - -@pytest.mark.asyncio -async def test_batch_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_predict( - prediction_service.BatchPredictRequest(), - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.PredictRequest, - dict, -]) -def test_predict_rest(request_type): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = prediction_service.PredictResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = prediction_service.PredictResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.predict(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - - -def test_predict_rest_required_fields(request_type=prediction_service.PredictRequest): - transport_class = transports.PredictionServiceRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = prediction_service.PredictResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = prediction_service.PredictResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.predict(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_predict_rest_unset_required_fields(): - transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.predict._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "payload", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_predict_rest_interceptors(null_interceptor): - transport = transports.PredictionServiceRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), - ) - client = PredictionServiceClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "post_predict") as post, \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_predict") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = prediction_service.PredictRequest.pb(prediction_service.PredictRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = prediction_service.PredictResponse.to_json(prediction_service.PredictResponse()) - - request = prediction_service.PredictRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = prediction_service.PredictResponse() - - client.predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.PredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.predict(request) - - -def test_predict_rest_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = prediction_service.PredictResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = prediction_service.PredictResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.predict(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:predict" % client.transport._host, args[1]) - - -def test_predict_rest_flattened_error(transport: str = 'rest'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - name='name_value', - payload=data_items.ExamplePayload(image=data_items.Image(image_bytes=b'image_bytes_blob')), - params={'key_value': 'value_value'}, - ) - - -def test_predict_rest_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - prediction_service.BatchPredictRequest, - dict, -]) -def test_batch_predict_rest(request_type): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.batch_predict(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_batch_predict_rest_required_fields(request_type=prediction_service.BatchPredictRequest): - transport_class = transports.PredictionServiceRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).batch_predict._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.batch_predict(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_batch_predict_rest_unset_required_fields(): - transport = transports.PredictionServiceRestTransport(credentials=ga_credentials.AnonymousCredentials) - - unset_fields = transport.batch_predict._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "inputConfig", "outputConfig", "params", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_batch_predict_rest_interceptors(null_interceptor): - transport = transports.PredictionServiceRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.PredictionServiceRestInterceptor(), - ) - client = PredictionServiceClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "post_batch_predict") as post, \ - mock.patch.object(transports.PredictionServiceRestInterceptor, "pre_batch_predict") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = prediction_service.BatchPredictRequest.pb(prediction_service.BatchPredictRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = prediction_service.BatchPredictRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.batch_predict(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_batch_predict_rest_bad_request(transport: str = 'rest', request_type=prediction_service.BatchPredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/locations/sample2/models/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.batch_predict(request) - - -def test_batch_predict_rest_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/locations/sample2/models/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.batch_predict(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict" % client.transport._host, args[1]) - - -def test_batch_predict_rest_flattened_error(transport: str = 'rest'): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_predict( - prediction_service.BatchPredictRequest(), - name='name_value', - input_config=io.BatchPredictInputConfig(gcs_source=io.GcsSource(input_uris=['input_uris_value'])), - output_config=io.BatchPredictOutputConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - params={'key_value': 'value_value'}, - ) - - -def test_batch_predict_rest_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest' - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options=options, - credentials=ga_credentials.AnonymousCredentials() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PredictionServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PredictionServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - transports.PredictionServiceRestTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "rest", -]) -def test_transport_kind(transport_name): - transport = PredictionServiceClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert transport.kind == transport_name - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PredictionServiceGrpcTransport, - ) - -def test_prediction_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_prediction_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'predict', - 'batch_predict', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - 'kind', - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_prediction_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_prediction_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.automl_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport() - adc.assert_called_once() - - -def test_prediction_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -def test_prediction_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - transports.PredictionServiceRestTransport, - ], -) -def test_prediction_service_transport_auth_gdch_credentials(transport_class): - host = 'https://language.com' - api_audience_tests = [None, 'https://language2.com'] - api_audience_expect = [host, 'https://language2.com'] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with( - e - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PredictionServiceGrpcTransport, grpc_helpers), - (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "automl.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="automl.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - -def test_prediction_service_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: - transports.PredictionServiceRestTransport ( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -def test_prediction_service_rest_lro_client(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='rest', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_prediction_service_host_no_port(transport_name): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com' - ) - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_prediction_service_host_with_port(transport_name): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='automl.googleapis.com:8000'), - transport=transport_name, - ) - assert client.transport._host == ( - 'automl.googleapis.com:8000' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://automl.googleapis.com:8000' - ) - -@pytest.mark.parametrize("transport_name", [ - "rest", -]) -def test_prediction_service_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() - client1 = PredictionServiceClient( - credentials=creds1, - transport=transport_name, - ) - client2 = PredictionServiceClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.predict._session - session2 = client2.transport.predict._session - assert session1 != session2 - session1 = client1.transport.batch_predict._session - session2 = client2.transport.batch_predict._session - assert session1 != session2 -def test_prediction_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_prediction_service_grpc_lro_client(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_prediction_service_grpc_lro_async_client(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PredictionServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = PredictionServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_model_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PredictionServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = PredictionServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = PredictionServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = PredictionServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PredictionServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = PredictionServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = PredictionServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = PredictionServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PredictionServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = PredictionServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PredictionServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'rest', - 'grpc', - ] - for transport in transports: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - -@pytest.mark.parametrize("client_class,transport_class", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport), -]) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json index 6338d6e6..df593972 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-automl", - "version": "2.11.3" + "version": "0.1.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json index 0f0f1bdb..70c04874 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-automl", - "version": "2.11.3" + "version": "0.1.0" }, "snippets": [ { diff --git a/tests/unit/gapic/automl_v1/test_auto_ml.py b/tests/unit/gapic/automl_v1/test_auto_ml.py index 2835d28e..1e2239da 100644 --- a/tests/unit/gapic/automl_v1/test_auto_ml.py +++ b/tests/unit/gapic/automl_v1/test_auto_ml.py @@ -5780,7 +5780,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -5793,7 +5794,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["dataset"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -5816,7 +5818,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") @@ -6745,7 +6748,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -6758,7 +6762,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["dataset"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -6781,7 +6786,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") @@ -8218,7 +8224,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -8231,7 +8238,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["model"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -8254,7 +8262,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") @@ -9456,7 +9465,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -9469,7 +9479,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["model"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -9492,7 +9503,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") diff --git a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py index e4e6b0ea..61b87bb2 100644 --- a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py +++ b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py @@ -7662,7 +7662,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -7675,7 +7676,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["dataset"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -7698,7 +7700,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") @@ -8653,7 +8656,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -8666,7 +8670,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["dataset"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["dataset"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -8689,7 +8694,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") @@ -10729,7 +10735,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -10742,7 +10749,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["table_spec"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table_spec"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -10765,7 +10773,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") @@ -11746,7 +11755,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -11759,7 +11769,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["column_spec"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["column_spec"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -11782,7 +11793,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield") @@ -12180,7 +12192,8 @@ def get_message_fields(field): if is_field_type_proto_plus_type: message_fields = field.message.meta.fields.values() - else: + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER message_fields = field.message.DESCRIPTOR.fields return message_fields @@ -12193,7 +12206,8 @@ def get_message_fields(field): subfields_not_in_runtime = [] # For each item in the sample request, create a list of sub fields which are not present at runtime - for field, value in request_init["model"].items(): + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -12216,7 +12230,8 @@ def get_message_fields(field): ) # Remove fields from the sample request which are not present in the runtime version of the dependency - for subfield_to_delete in subfields_not_in_runtime: + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER field = subfield_to_delete.get("field") field_repeated = subfield_to_delete.get("is_repeated") subfield = subfield_to_delete.get("subfield")