diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py index 992e9bafb3..72a0bb4dbf 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py @@ -235,15 +235,15 @@ class AutoTransformation(proto.Message): class NumericTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal to + 0. Otherwise, this transformation is not applied and the value is + considered a missing value. + - A boolean value that indicates whether the value is valid. Attributes: column_name (str): @@ -268,13 +268,13 @@ class NumericTransformation(proto.Message): class CategoricalTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category gets + its own special lookup index and resulting embedding. Attributes: column_name (str): @@ -289,13 +289,13 @@ class CategoricalTransformation(proto.Message): class TimestampTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside of + a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. Attributes: column_name (str): @@ -304,15 +304,15 @@ class TimestampTransformation(proto.Message): The format in which that time field is expressed. The time_format must either be one of: - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` + (e.g. 1985-04-12T23:20:50.52Z) invalid_values_allowed (bool): If invalid values is allowed, the training pipeline will create a boolean feature that @@ -337,15 +337,14 @@ class TimestampTransformation(proto.Message): class TextTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting embedding. + - Stop-words receive no special treatment and are not removed. Attributes: column_name (str): @@ -361,9 +360,9 @@ class NumericArrayTransformation(proto.Message): r"""Treats the column as numerical array and performs following transformation functions. - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. Attributes: column_name (str): @@ -389,11 +388,11 @@ class CategoricalArrayTransformation(proto.Message): r"""Treats the column as categorical array and performs following transformation functions. - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. Attributes: column_name (str): @@ -409,10 +408,10 @@ class TextArrayTransformation(proto.Message): r"""Treats the column as text array and performs following transformation functions. - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as a + single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. Attributes: column_name (str): diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py index 49be440eb0..f0de3bb9be 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py @@ -39,7 +39,7 @@ class ExportEvaluatedDataItemsConfig(proto.Message): If not specified, then results are exported to the following auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples + :export_evaluated_examples\_\_.evaluated_examples override_existing_table (bool): If true and an export destination is specified, then the contents of the destination diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py index 01150fa6fa..5446fd2f0d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -235,15 +235,15 @@ class AutoTransformation(proto.Message): class NumericTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal to + 0. Otherwise, this transformation is not applied and the value is + considered a missing value. + - A boolean value that indicates whether the value is valid. Attributes: column_name (str): @@ -268,13 +268,13 @@ class NumericTransformation(proto.Message): class CategoricalTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category gets + its own special lookup index and resulting embedding. Attributes: column_name (str): @@ -289,13 +289,13 @@ class CategoricalTransformation(proto.Message): class TimestampTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside of + a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. Attributes: column_name (str): @@ -304,15 +304,15 @@ class TimestampTransformation(proto.Message): The format in which that time field is expressed. The time_format must either be one of: - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` + (e.g. 1985-04-12T23:20:50.52Z) invalid_values_allowed (bool): If invalid values is allowed, the training pipeline will create a boolean feature that @@ -337,15 +337,14 @@ class TimestampTransformation(proto.Message): class TextTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting embedding. + - Stop-words receive no special treatment and are not removed. Attributes: column_name (str): @@ -361,9 +360,9 @@ class NumericArrayTransformation(proto.Message): r"""Treats the column as numerical array and performs following transformation functions. - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. Attributes: column_name (str): @@ -389,11 +388,11 @@ class CategoricalArrayTransformation(proto.Message): r"""Treats the column as categorical array and performs following transformation functions. - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. Attributes: column_name (str): @@ -409,10 +408,10 @@ class TextArrayTransformation(proto.Message): r"""Treats the column as text array and performs following transformation functions. - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as a + single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. Attributes: column_name (str): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py index faa0d9f1e4..e7ce5d8db3 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -84,23 +84,23 @@ class AutoMlForecastingInputs(proto.Message): The supported optimization objectives: - - "minimize-rmse" (default) - Minimize root-mean-squared - error (RMSE). + - "minimize-rmse" (default) - Minimize root-mean-squared + error (RMSE). - - "minimize-mae" - Minimize mean-absolute error (MAE). + - "minimize-mae" - Minimize mean-absolute error (MAE). - - "minimize-rmsle" - Minimize root-mean-squared log error - (RMSLE). + - "minimize-rmsle" - Minimize root-mean-squared log error + (RMSLE). - - "minimize-rmspe" - Minimize root-mean-squared percentage - error (RMSPE). + - "minimize-rmspe" - Minimize root-mean-squared percentage + error (RMSPE). - - "minimize-wape-mae" - Minimize the combination of - weighted absolute percentage error (WAPE) and - mean-absolute-error (MAE). + - "minimize-wape-mae" - Minimize the combination of weighted + absolute percentage error (WAPE) and mean-absolute-error + (MAE). - - "minimize-quantile-loss" - Minimize the quantile loss at - the quantiles defined in ``quantiles``. + - "minimize-quantile-loss" - Minimize the quantile loss at + the quantiles defined in ``quantiles``. train_budget_milli_node_hours (int): Required. The train budget of creating this model, expressed in milli node hours i.e. 1,000 @@ -175,11 +175,11 @@ class AutoMlForecastingInputs(proto.Message): Validation options for the data validation component. The available options are: - - "fail-pipeline" - default, will validate against the - validation and fail the pipeline if it fails. + - "fail-pipeline" - default, will validate against the + validation and fail the pipeline if it fails. - - "ignore-validation" - ignore the results of the - validation and continue + - "ignore-validation" - ignore the results of the validation + and continue additional_experiments (MutableSequence[str]): Additional experiment flags for the time series forcasting training. @@ -230,19 +230,19 @@ class AutoTransformation(proto.Message): class NumericTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The value converted to float32. + - The value converted to float32. - - The z_score of the value. + - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. + - z_score of log(value+1) when the value is greater than or equal to + 0. Otherwise, this transformation is not applied and the value is + considered a missing value. - - A boolean value that indicates whether the value is valid. + - A boolean value that indicates whether the value is valid. Attributes: column_name (str): @@ -257,15 +257,15 @@ class NumericTransformation(proto.Message): class CategoricalTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category gets + its own special lookup index and resulting embedding. Attributes: column_name (str): @@ -280,14 +280,14 @@ class CategoricalTransformation(proto.Message): class TimestampTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - Apply the transformation functions for Numerical columns. + - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the timestamp as a Categorical column. + - Determine the year, month, day,and weekday. Treat each value from + the timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. + - Invalid numerical values (for example, values that fall outside of + a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. Attributes: column_name (str): @@ -296,13 +296,13 @@ class TimestampTransformation(proto.Message): The format in which that time field is expressed. The time_format must either be one of: - - ``unix-seconds`` + - ``unix-seconds`` - - ``unix-milliseconds`` + - ``unix-milliseconds`` - - ``unix-microseconds`` + - ``unix-microseconds`` - - ``unix-nanoseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); @@ -326,11 +326,11 @@ class TimestampTransformation(proto.Message): class TextTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The text as is--no change to case, punctuation, spelling, tense, - and so on. + - The text as is--no change to case, punctuation, spelling, tense, + and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. Attributes: column_name (str): @@ -387,17 +387,17 @@ class Granularity(proto.Message): The time granularity unit of this time period. The supported units are: - - "minute" + - "minute" - - "hour" + - "hour" - - "day" + - "day" - - "week" + - "week" - - "month" + - "month" - - "year". + - "year". quantity (int): The number of granularity_units between data points in the training data. If ``granularity_unit`` is ``minute``, can be diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py index 8c28ebbf22..3395793976 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -39,7 +39,7 @@ class ExportEvaluatedDataItemsConfig(proto.Message): If not specified, then results are exported to the following auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples + :export_evaluated_examples\_\_.evaluated_examples override_existing_table (bool): If true and an export destination is specified, then the contents of the destination diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/client.py b/google/cloud/aiplatform_v1/services/data_foundry_service/client.py index 1e43dcf4d6..ac57cfca68 100644 --- a/google/cloud/aiplatform_v1/services/data_foundry_service/client.py +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/client.py @@ -602,11 +602,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = DataFoundryServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DataFoundryServiceClient._read_environment_variables() + ) self._client_cert_source = DataFoundryServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_asyncio.py index 064d809bf6..463a35b85b 100644 --- a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_asyncio.py @@ -675,11 +675,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_generate_synthetic_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_generate_synthetic_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_generate_synthetic_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index 539ca531b3..f321aca6be 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -55,9 +55,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import saved_query @@ -640,9 +638,9 @@ async def sample_update_dataset(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1459,7 +1457,7 @@ async def sample_update_dataset_version(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 28ce3a76dc..4ceb31d291 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -71,9 +71,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import saved_query @@ -761,11 +759,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = DatasetServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DatasetServiceClient._read_environment_variables() + ) self._client_cert_source = DatasetServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1205,9 +1201,9 @@ def sample_update_dataset(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2006,7 +2002,7 @@ def sample_update_dataset_version(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py index 7bba498257..52052973e1 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -33,9 +33,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index 5090565b00..4e22a22dc9 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -36,9 +36,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py index 4146c0d9a0..a31a290d0b 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -39,9 +39,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py index c96dd29a92..6dae00e629 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py @@ -42,9 +42,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py index 8ab79ff345..ddc755b15b 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py @@ -55,9 +55,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.longrunning import operations_pb2 # type: ignore @@ -4214,11 +4212,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_restore_dataset_version(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_restore_dataset_version_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_restore_dataset_version_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py index a1cd4c487f..f04b69be2a 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py @@ -32,9 +32,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py index 28a92f5415..c8d84733bd 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers from google.cloud.aiplatform_v1.types import deployment_resource_pool from google.cloud.aiplatform_v1.types import ( deployment_resource_pool as gca_deployment_resource_pool, diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py index 4ac6c47083..02d6f4d90c 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers from google.cloud.aiplatform_v1.types import deployment_resource_pool from google.cloud.aiplatform_v1.types import ( deployment_resource_pool as gca_deployment_resource_pool, @@ -712,11 +710,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = DeploymentResourcePoolServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DeploymentResourcePoolServiceClient._read_environment_variables() + ) self._client_cert_source = ( DeploymentResourcePoolServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py index d9e341bbd6..19d8463226 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py @@ -2864,11 +2864,10 @@ def __call__( resp = self._interceptor.post_create_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3020,11 +3019,10 @@ def __call__( resp = self._interceptor.post_delete_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3343,11 +3341,10 @@ def __call__( resp = self._interceptor.post_list_deployment_resource_pools(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_deployment_resource_pools_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_deployment_resource_pools_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3664,11 +3661,10 @@ def __call__( resp = self._interceptor.post_update_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py index ce2986330b..31a3fa2932 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py @@ -933,11 +933,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseCreateDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseCreateDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1008,11 +1007,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1105,11 +1103,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseDeleteDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseDeleteDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1175,11 +1172,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1273,11 +1269,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseGetDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseGetDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1343,11 +1338,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1443,11 +1437,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseListDeploymentResourcePools._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_deployment_resource_pools( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_deployment_resource_pools( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseListDeploymentResourcePools._get_transcoded_request( http_options, request @@ -1519,11 +1512,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_deployment_resource_pools(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_deployment_resource_pools_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_deployment_resource_pools_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1782,11 +1774,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseUpdateDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseUpdateDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1857,11 +1848,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 8313b60861..2d85152337 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -1516,19 +1516,19 @@ async def sample_mutate_deployed_model(): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``required_replica_count`` in - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index 741fab91fd..d162472c70 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -738,11 +738,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = EndpointServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + EndpointServiceClient._read_environment_variables() + ) self._client_cert_source = EndpointServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -2044,19 +2042,19 @@ def sample_mutate_deployed_model(): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``required_replica_count`` in - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py index 29488aedb4..7de4beb77f 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py @@ -2404,11 +2404,10 @@ async def __call__( _BaseEndpointServiceRestTransport._BaseUpdateEndpointLongRunning._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_endpoint_long_running( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_endpoint_long_running( + request, metadata + ) ) transcoded_request = _BaseEndpointServiceRestTransport._BaseUpdateEndpointLongRunning._get_transcoded_request( http_options, request @@ -2479,11 +2478,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_endpoint_long_running(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_endpoint_long_running_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_endpoint_long_running_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/client.py b/google/cloud/aiplatform_v1/services/evaluation_service/client.py index 736d44826e..73a08c63f1 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/client.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/client.py @@ -600,11 +600,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = EvaluationServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + EvaluationServiceClient._read_environment_variables() + ) self._client_cert_source = EvaluationServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py index cdb11cd2b7..7d4818f6b4 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py @@ -55,9 +55,7 @@ from google.cloud.aiplatform_v1.types import ( feature_online_store as gca_feature_online_store, ) -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1.types import feature_view_sync @@ -854,11 +852,11 @@ async def sample_update_feature_online_store(): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1590,16 +1588,16 @@ async def sample_update_feature_view(): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` - - ``optimized_config.automatic_resources`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` + - ``optimized_config.automatic_resources`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py index eca9390451..a8e6c20722 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py @@ -71,9 +71,7 @@ from google.cloud.aiplatform_v1.types import ( feature_online_store as gca_feature_online_store, ) -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1.types import feature_view_sync @@ -706,11 +704,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureOnlineStoreAdminServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureOnlineStoreAdminServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeatureOnlineStoreAdminServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert @@ -1355,11 +1351,11 @@ def sample_update_feature_online_store(): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2080,16 +2076,16 @@ def sample_update_feature_view(): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` - - ``optimized_config.automatic_resources`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` + - ``optimized_config.automatic_resources`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py index 24ff412733..2a34fd35e0 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py @@ -38,9 +38,7 @@ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore from google.cloud.aiplatform_v1.types import feature_online_store -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view_sync diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py index 092e84db82..174f19323e 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py @@ -29,9 +29,7 @@ import google.protobuf from google.cloud.aiplatform_v1.types import feature_online_store -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view_sync from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py index 2be8585d01..98b494d0ae 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py @@ -32,9 +32,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import feature_online_store -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view_sync from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py index e0a08c05e7..3fe312c0aa 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py @@ -35,9 +35,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1.types import feature_online_store -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view_sync from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py index 5897c7748c..9fe55795ae 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1.types import feature_online_store -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view_sync from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py index e7a0e49242..d8d8852e98 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py @@ -51,9 +51,7 @@ from google.cloud.aiplatform_v1.types import feature_online_store -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view_sync from google.longrunning import operations_pb2 # type: ignore @@ -1453,11 +1451,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1783,11 +1780,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2108,11 +2104,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2600,11 +2595,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_online_stores(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_feature_online_stores_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_feature_online_stores_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2929,11 +2923,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_view_syncs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_feature_view_syncs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_feature_view_syncs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3270,11 +3263,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py index 9f36737609..135afb37c8 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1.types import feature_online_store -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view_sync from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py index 6b5be6debb..ded1e2a7f9 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py @@ -632,11 +632,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureOnlineStoreServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureOnlineStoreServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeatureOnlineStoreServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py index 9685cc1419..355db858d7 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py @@ -944,11 +944,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_nearest_entities(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_nearest_entities_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_nearest_entities_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py index df42e9943a..c2fd1c0f47 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py @@ -47,15 +47,11 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.feature_registry_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature from google.cloud.aiplatform_v1.types import feature_group -from google.cloud.aiplatform_v1.types import ( - feature_group as gca_feature_group, -) +from google.cloud.aiplatform_v1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1.types import feature_registry_service from google.cloud.aiplatform_v1.types import featurestore_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -792,10 +788,10 @@ async def sample_update_feature_group(): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1660,12 +1656,12 @@ async def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py index fa50cbc647..98acc68b18 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py @@ -63,15 +63,11 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.feature_registry_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature from google.cloud.aiplatform_v1.types import feature_group -from google.cloud.aiplatform_v1.types import ( - feature_group as gca_feature_group, -) +from google.cloud.aiplatform_v1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1.types import feature_registry_service from google.cloud.aiplatform_v1.types import featurestore_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -668,11 +664,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureRegistryServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureRegistryServiceClient._read_environment_variables() + ) self._client_cert_source = FeatureRegistryServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1269,10 +1263,10 @@ def sample_update_feature_group(): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2119,12 +2113,12 @@ def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py index c211d6b7ae..c4669a443e 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py @@ -638,11 +638,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeaturestoreOnlineServingServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeaturestoreOnlineServingServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeaturestoreOnlineServingServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py index e425991fd2..4a61f87648 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py @@ -898,12 +898,27 @@ def __call__( resp = self._interceptor.post_streaming_read_feature_values(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_streaming_read_feature_values_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_streaming_read_feature_values_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "rpcName": "StreamingReadFeatureValues", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _WriteFeatureValues( diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py index cd2a1c7f6f..98d0566dcc 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py @@ -910,11 +910,10 @@ async def __call__( _BaseFeaturestoreOnlineServingServiceRestTransport._BaseStreamingReadFeatureValues._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_streaming_read_feature_values( - request, metadata + request, metadata = ( + await self._interceptor.pre_streaming_read_feature_values( + request, metadata + ) ) transcoded_request = _BaseFeaturestoreOnlineServingServiceRestTransport._BaseStreamingReadFeatureValues._get_transcoded_request( http_options, request @@ -984,12 +983,28 @@ async def __call__( ) resp = await self._interceptor.post_streaming_read_feature_values(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_streaming_read_feature_values_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_streaming_read_feature_values_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "rpcName": "StreamingReadFeatureValues", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _WriteFeatureValues( diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py index 5033ced735..a35ba31d84 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -791,10 +791,10 @@ async def sample_update_featurestore(): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1494,16 +1494,16 @@ async def sample_update_entity_type(): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2359,12 +2359,12 @@ async def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -3241,15 +3241,15 @@ async def sample_search_features(): to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches characters + within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double @@ -3259,47 +3259,47 @@ async def sample_search_features(): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with + ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py index 3cbbe2a5df..36486f57e2 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -689,11 +689,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeaturestoreServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeaturestoreServiceClient._read_environment_variables() + ) self._client_cert_source = FeaturestoreServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1288,10 +1286,10 @@ def sample_update_featurestore(): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1976,16 +1974,16 @@ def sample_update_entity_type(): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2823,12 +2821,12 @@ def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -3689,15 +3687,15 @@ def sample_search_features(): to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches characters + within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double @@ -3707,47 +3705,47 @@ def sample_search_features(): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with + ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py index 5098c678d7..9602b1526f 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py @@ -2098,11 +2098,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_read_feature_values(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_read_feature_values_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_read_feature_values_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py index f104a6f3c7..8dd9d772a7 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py @@ -47,9 +47,7 @@ from google.cloud.aiplatform_v1.services.gen_ai_cache_service import pagers from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import gen_ai_cache_service diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py index 1b86298d1b..a610b5bdaf 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py @@ -63,9 +63,7 @@ from google.cloud.aiplatform_v1.services.gen_ai_cache_service import pagers from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import gen_ai_cache_service @@ -655,11 +653,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = GenAiCacheServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + GenAiCacheServiceClient._read_environment_variables() + ) self._client_cert_source = GenAiCacheServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py index a042ce3aff..2da4fe30d4 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py @@ -28,9 +28,7 @@ import google.protobuf from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py index 154386373b..3d355fc3d1 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py @@ -31,9 +31,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py index bead2bdaa6..5e7972915d 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py @@ -34,9 +34,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py index cbb60ebb02..2946ea0c0b 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py @@ -37,9 +37,7 @@ from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py index 67343c7b9c..774842c79f 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py @@ -49,9 +49,7 @@ from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py index 7c3d761912..723bd93d46 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import gen_ai_cache_service from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py index a4ea54b000..be6c5d313f 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py @@ -721,11 +721,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = GenAiTuningServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + GenAiTuningServiceClient._read_environment_variables() + ) self._client_cert_source = GenAiTuningServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index 8f9a542686..ac47a2e3ae 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -50,9 +50,7 @@ from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import service_networking diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py index b82bafabba..804eb76826 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -66,9 +66,7 @@ from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import service_networking @@ -679,11 +677,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = IndexEndpointServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + IndexEndpointServiceClient._read_environment_variables() + ) self._client_cert_source = IndexEndpointServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py index 3d98fd4348..0c9a56d5cf 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -29,9 +29,7 @@ import google.protobuf from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py index 06ea19c147..bdd6e1dac5 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py @@ -32,9 +32,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py index 576d2bc035..26b1ccd58d 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -35,9 +35,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py index 68f496bccd..492de5b2ff 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py index f79d24be7a..de9cb8d2ad 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py @@ -51,9 +51,7 @@ from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py index 055945e912..5a8c949ec6 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/index_service/client.py b/google/cloud/aiplatform_v1/services/index_service/client.py index 773814a3d5..9ac60cf865 100644 --- a/google/cloud/aiplatform_v1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_service/client.py @@ -652,11 +652,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = IndexServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + IndexServiceClient._read_environment_variables() + ) self._client_cert_source = IndexServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 5b7e22bcef..7925344a65 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -56,9 +56,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job @@ -4345,18 +4343,17 @@ async def sample_update_model_deployment_monitoring_job(): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index c8c4a39ef0..5569a33955 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job @@ -1034,11 +1032,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = JobServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + JobServiceClient._read_environment_variables() + ) self._client_cert_source = JobServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -5069,18 +5065,17 @@ def sample_update_model_deployment_monitoring_job(): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py index 767b9da265..1ce2c70899 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -35,9 +35,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index 9e1fea3932..7e67f00571 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py index 55b67f9597..770271b885 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -41,9 +41,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py index 3b2bd4a76c..b334da0d9f 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py @@ -44,9 +44,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, @@ -5355,11 +5353,10 @@ def __call__( resp = self._interceptor.post_create_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5456,11 +5453,10 @@ def __call__( _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_create_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_create_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -5530,11 +5526,10 @@ def __call__( resp = self._interceptor.post_create_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6296,11 +6291,10 @@ def __call__( resp = self._interceptor.post_delete_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6390,11 +6384,10 @@ def __call__( _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_delete_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_delete_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -6453,11 +6446,10 @@ def __call__( resp = self._interceptor.post_delete_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7230,11 +7222,10 @@ def __call__( resp = self._interceptor.post_get_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7330,11 +7321,10 @@ def __call__( _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_get_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_get_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7399,11 +7389,10 @@ def __call__( resp = self._interceptor.post_get_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8319,11 +8308,10 @@ def __call__( resp = self._interceptor.post_list_hyperparameter_tuning_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8416,11 +8404,10 @@ def __call__( _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_list_model_deployment_monitoring_jobs( - request, metadata + request, metadata = ( + self._interceptor.pre_list_model_deployment_monitoring_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_transcoded_request( http_options, request @@ -8481,11 +8468,10 @@ def __call__( resp = self._interceptor.post_list_model_deployment_monitoring_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8877,11 +8863,10 @@ def __call__( _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_pause_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_pause_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -8998,11 +8983,10 @@ def __call__( _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_resume_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_resume_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -9127,11 +9111,10 @@ def __call__( _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( - request, metadata + request, metadata = ( + self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_transcoded_request( http_options, request @@ -9203,11 +9186,10 @@ def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -9300,11 +9282,10 @@ def __call__( _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_update_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_update_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -9368,11 +9349,10 @@ def __call__( resp = self._interceptor.post_update_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py index 297fc26c4e..78efe15f0b 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py @@ -57,9 +57,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, @@ -2857,11 +2855,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCancelHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_cancel_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_cancel_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCancelHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -3195,11 +3192,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_batch_prediction_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_batch_prediction_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_batch_prediction_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3535,11 +3531,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_data_labeling_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_data_labeling_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_data_labeling_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3634,11 +3629,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCreateHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -3709,11 +3703,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3813,11 +3806,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -3894,11 +3886,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4226,11 +4217,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_batch_prediction_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_batch_prediction_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_batch_prediction_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4547,11 +4537,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_data_labeling_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_data_labeling_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_data_labeling_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4642,11 +4631,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseDeleteHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -4712,11 +4700,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4809,11 +4796,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -4881,11 +4867,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5208,11 +5193,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_batch_prediction_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_batch_prediction_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_batch_prediction_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5633,11 +5617,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseGetHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -5703,11 +5686,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5804,11 +5786,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -5878,11 +5859,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6361,11 +6341,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_batch_prediction_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_batch_prediction_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_batch_prediction_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6684,11 +6663,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_data_labeling_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_data_labeling_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_data_labeling_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6780,11 +6758,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseListHyperparameterTuningJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_hyperparameter_tuning_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_hyperparameter_tuning_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListHyperparameterTuningJobs._get_transcoded_request( http_options, request @@ -6850,11 +6827,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_hyperparameter_tuning_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6950,11 +6926,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_deployment_monitoring_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_deployment_monitoring_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_transcoded_request( http_options, request @@ -7022,11 +6997,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7437,11 +7411,10 @@ async def __call__( _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_pause_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_pause_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7566,11 +7539,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_resume_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_resume_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7701,11 +7673,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( - request, metadata + request, metadata = ( + await self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_transcoded_request( http_options, request @@ -7782,11 +7753,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7882,11 +7852,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7959,11 +7928,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py index a83ac9c96c..fe67301f48 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py @@ -34,9 +34,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py index 86ad43747b..4c2d013421 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py @@ -647,11 +647,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = LlmUtilityServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + LlmUtilityServiceClient._read_environment_variables() + ) self._client_cert_source = LlmUtilityServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/match_service/client.py b/google/cloud/aiplatform_v1/services/match_service/client.py index ed45e283d5..f55e7a8f0f 100644 --- a/google/cloud/aiplatform_v1/services/match_service/client.py +++ b/google/cloud/aiplatform_v1/services/match_service/client.py @@ -619,11 +619,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MatchServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MatchServiceClient._read_environment_variables() + ) self._client_cert_source = MatchServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py index 5993b4a989..df01b930c4 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -58,14 +58,10 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store -from google.cloud.aiplatform_v1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/metadata_service/client.py b/google/cloud/aiplatform_v1/services/metadata_service/client.py index 28b77fe7df..e374d2abb9 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/client.py @@ -74,14 +74,10 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store -from google.cloud.aiplatform_v1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -741,11 +737,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MetadataServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MetadataServiceClient._read_environment_variables() + ) self._client_cert_source = MetadataServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py index 885c9b36ae..8da916c27e 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py @@ -36,9 +36,7 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py index 897765ec9b..486da4b988 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py @@ -39,9 +39,7 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py index 9929403b3a..f319a6a00d 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py @@ -42,9 +42,7 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py index 9b4fff920b..d048bac303 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py @@ -45,9 +45,7 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store from google.longrunning import operations_pb2 # type: ignore @@ -4279,11 +4277,10 @@ def __call__( _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_add_context_artifacts_and_executions( - request, metadata + request, metadata = ( + self._interceptor.pre_add_context_artifacts_and_executions( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_transcoded_request( http_options, request @@ -4349,11 +4346,10 @@ def __call__( resp = self._interceptor.post_add_context_artifacts_and_executions(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_add_context_artifacts_and_executions_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_add_context_artifacts_and_executions_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8144,11 +8140,10 @@ def __call__( resp = self._interceptor.post_query_artifact_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8304,11 +8299,10 @@ def __call__( resp = self._interceptor.post_query_context_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_query_context_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_query_context_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8400,11 +8394,10 @@ def __call__( _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_query_execution_inputs_and_outputs( - request, metadata + request, metadata = ( + self._interceptor.pre_query_execution_inputs_and_outputs( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_transcoded_request( http_options, request @@ -8465,11 +8458,10 @@ def __call__( resp = self._interceptor.post_query_execution_inputs_and_outputs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py index dab211831b..7c84da6537 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py @@ -58,9 +58,7 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store from google.longrunning import operations_pb2 # type: ignore @@ -2551,11 +2549,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_add_context_artifacts_and_executions( - request, metadata + request, metadata = ( + await self._interceptor.pre_add_context_artifacts_and_executions( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_transcoded_request( http_options, request @@ -2628,11 +2625,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_add_context_artifacts_and_executions_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_add_context_artifacts_and_executions_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6594,11 +6590,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryArtifactLineageSubgraph._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_artifact_lineage_subgraph( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_artifact_lineage_subgraph( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryArtifactLineageSubgraph._get_transcoded_request( http_options, request @@ -6664,11 +6659,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_artifact_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6761,11 +6755,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryContextLineageSubgraph._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_context_lineage_subgraph( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_context_lineage_subgraph( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryContextLineageSubgraph._get_transcoded_request( http_options, request @@ -6831,11 +6824,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_context_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_query_context_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_query_context_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6930,11 +6922,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_execution_inputs_and_outputs( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_execution_inputs_and_outputs( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_transcoded_request( http_options, request @@ -7000,11 +6991,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_execution_inputs_and_outputs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7168,11 +7158,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_remove_context_children(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_remove_context_children_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_remove_context_children_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py index 58a0d06380..c796c585c5 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py @@ -35,9 +35,7 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 1fe8e002d9..3712ee16f8 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -753,11 +753,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MigrationServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MigrationServiceClient._read_environment_variables() + ) self._client_cert_source = MigrationServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py index 8e75ad9035..998dba0ab2 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py @@ -743,11 +743,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_migrate_resources(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_migrate_resources_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_migrate_resources_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -910,11 +909,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_migratable_resources(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_migratable_resources_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_migratable_resources_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/client.py b/google/cloud/aiplatform_v1/services/model_garden_service/client.py index 40a77d4418..5630b72eba 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/client.py @@ -687,11 +687,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ModelGardenServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ModelGardenServiceClient._read_environment_variables() + ) self._client_cert_source = ModelGardenServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index 69d611ec0e..ff619028f0 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -55,9 +55,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.aiplatform_v1.types import operation as gca_operation diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index 8cd2a5ca5b..0243d9160b 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -71,9 +71,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -751,11 +749,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ModelServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ModelServiceClient._read_environment_variables() + ) self._client_cert_source = ModelServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index 77fd300f0a..102f5e8a31 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -31,9 +31,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index 6d64cfa1bc..3135d3e4ef 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -34,9 +34,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index b9415e9bff..f1e7467855 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -37,9 +37,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py index 839dd31ac3..c856241df3 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py @@ -40,9 +40,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -3531,11 +3529,10 @@ def __call__( _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_import_evaluated_annotations( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_import_evaluated_annotations( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_transcoded_request( http_options, request @@ -3601,11 +3598,10 @@ def __call__( resp = self._interceptor.post_batch_import_evaluated_annotations(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_import_evaluated_annotations_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_import_evaluated_annotations_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3699,11 +3695,10 @@ def __call__( _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_import_model_evaluation_slices( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_import_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_transcoded_request( http_options, request @@ -3769,11 +3764,10 @@ def __call__( resp = self._interceptor.post_batch_import_model_evaluation_slices(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5628,11 +5622,10 @@ def __call__( resp = self._interceptor.post_list_model_version_checkpoints(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_model_version_checkpoints_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_model_version_checkpoints_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py index 31cec31efc..64604b4df7 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py @@ -53,9 +53,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -1732,11 +1730,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_import_evaluated_annotations( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_import_evaluated_annotations( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_transcoded_request( http_options, request @@ -1807,11 +1804,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_import_evaluated_annotations(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_import_evaluated_annotations_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_import_evaluated_annotations_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1908,11 +1904,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_import_model_evaluation_slices( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_import_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_transcoded_request( http_options, request @@ -1985,11 +1980,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3128,11 +3122,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_model_evaluation_slice(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_model_evaluation_slice_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_model_evaluation_slice_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3298,11 +3291,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_import_model_evaluation(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_import_model_evaluation_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_import_model_evaluation_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3553,11 +3545,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseListModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_evaluation_slices( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseListModelEvaluationSlices._get_transcoded_request( http_options, request @@ -3623,11 +3614,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_evaluation_slices(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_model_evaluation_slices_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_model_evaluation_slices_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3883,11 +3873,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_version_checkpoints( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_version_checkpoints( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_transcoded_request( http_options, request @@ -3953,11 +3942,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_version_checkpoints(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_model_version_checkpoints_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_model_version_checkpoints_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4448,11 +4436,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_explanation_dataset(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_explanation_dataset_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_explanation_dataset_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py index ad72576deb..e6afd8dc1a 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py @@ -30,9 +30,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py index fd5bf24485..270a0fef05 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py @@ -59,9 +59,7 @@ ) from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1.types import notebook_runtime -from google.cloud.aiplatform_v1.types import ( - notebook_runtime as gca_notebook_runtime, -) +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.aiplatform_v1.types import notebook_software_config @@ -954,7 +952,7 @@ async def sample_update_notebook_runtime_template(): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/notebook_service/client.py b/google/cloud/aiplatform_v1/services/notebook_service/client.py index 873b6d193a..ff3f57c84e 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/client.py @@ -75,9 +75,7 @@ ) from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1.types import notebook_runtime -from google.cloud.aiplatform_v1.types import ( - notebook_runtime as gca_notebook_runtime, -) +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.aiplatform_v1.types import notebook_software_config @@ -776,11 +774,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = NotebookServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + NotebookServiceClient._read_environment_variables() + ) self._client_cert_source = NotebookServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1520,7 +1516,7 @@ def sample_update_notebook_runtime_template(): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py index 510172ece9..9209bf23b6 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py @@ -3600,11 +3600,10 @@ def __call__( resp = self._interceptor.post_create_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3760,11 +3759,10 @@ def __call__( resp = self._interceptor.post_create_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3916,11 +3914,10 @@ def __call__( resp = self._interceptor.post_delete_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4222,11 +4219,10 @@ def __call__( resp = self._interceptor.post_delete_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4696,11 +4692,10 @@ def __call__( resp = self._interceptor.post_get_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_get_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_get_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5164,11 +5159,10 @@ def __call__( resp = self._interceptor.post_list_notebook_runtime_templates(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_notebook_runtime_templates_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_notebook_runtime_templates_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5644,11 +5638,10 @@ def __call__( resp = self._interceptor.post_update_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py index 1020e8ab06..9b81e30eaf 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py @@ -1630,11 +1630,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_assign_notebook_runtime(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_assign_notebook_runtime_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_assign_notebook_runtime_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1726,11 +1725,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseCreateNotebookExecutionJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_notebook_execution_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_notebook_execution_job( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseCreateNotebookExecutionJob._get_transcoded_request( http_options, request @@ -1801,11 +1799,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1899,11 +1896,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseCreateNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseCreateNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -1974,11 +1970,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2069,11 +2064,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseDeleteNotebookExecutionJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_notebook_execution_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_notebook_execution_job( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseDeleteNotebookExecutionJob._get_transcoded_request( http_options, request @@ -2139,11 +2133,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2300,11 +2293,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_runtime(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_notebook_runtime_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_notebook_runtime_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2397,11 +2389,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseDeleteNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseDeleteNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -2467,11 +2458,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2628,11 +2618,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2891,11 +2880,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseGetNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseGetNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -2961,11 +2949,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3057,11 +3044,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseListNotebookExecutionJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_notebook_execution_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_notebook_execution_jobs( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseListNotebookExecutionJobs._get_transcoded_request( http_options, request @@ -3127,11 +3113,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_notebook_execution_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_notebook_execution_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_notebook_execution_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3386,11 +3371,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseListNotebookRuntimeTemplates._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_notebook_runtime_templates( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_notebook_runtime_templates( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseListNotebookRuntimeTemplates._get_transcoded_request( http_options, request @@ -3456,11 +3440,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_notebook_runtime_templates(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_notebook_runtime_templates_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_notebook_runtime_templates_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3888,11 +3871,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseUpdateNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseUpdateNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -3963,11 +3945,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4132,11 +4113,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_upgrade_notebook_runtime(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_upgrade_notebook_runtime_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_upgrade_notebook_runtime_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py index 2e4f86bc01..bb0ea15a2f 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.persistent_resource_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import persistent_resource diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py index e39f369669..d41cd14a2f 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.persistent_resource_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import persistent_resource @@ -707,11 +705,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PersistentResourceServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PersistentResourceServiceClient._read_environment_variables() + ) self._client_cert_source = ( PersistentResourceServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py index e303e2be91..03627cfd7e 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py @@ -1001,11 +1001,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1165,11 +1164,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1330,11 +1328,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1496,11 +1493,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_persistent_resources(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_persistent_resources_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_persistent_resources_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1668,11 +1664,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_reboot_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_reboot_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_reboot_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1838,11 +1833,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index fd930acb7f..48bc855dc4 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -57,9 +57,7 @@ from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import service_networking from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index a7525eb937..cc05c1721c 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -73,9 +73,7 @@ from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import service_networking from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -843,11 +841,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PipelineServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PipelineServiceClient._read_environment_variables() + ) self._client_cert_source = PipelineServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index 5c9e73e6e1..cbf70be45d 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -32,9 +32,7 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 9f13d254dc..5888e0ba71 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -35,9 +35,7 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index ac1b45abc5..a91ae454f8 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py index dd8158adb1..4bda3b6bd8 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py @@ -41,9 +41,7 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py index cc86368505..aef88c7cff 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py @@ -54,9 +54,7 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -1303,11 +1301,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_cancel_pipeline_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_cancel_pipeline_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_cancel_pipeline_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1471,11 +1468,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_delete_pipeline_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_delete_pipeline_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_delete_pipeline_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2050,11 +2046,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_training_pipeline(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_training_pipeline_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_training_pipeline_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2371,11 +2366,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_training_pipeline(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_training_pipeline_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_training_pipeline_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3014,11 +3008,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_training_pipelines(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_training_pipelines_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_training_pipelines_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py index a8fdc60846..5c9d04114e 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py @@ -31,9 +31,7 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index 297dcc3fa2..d2c4e46e19 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -477,13 +477,13 @@ async def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that + served this prediction. .. code-block:: python diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 22e1023edb..71ae364277 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -719,11 +719,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PredictionServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PredictionServiceClient._read_environment_variables() + ) self._client_cert_source = PredictionServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1017,13 +1015,13 @@ def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that + served this prediction. .. code-block:: python diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py index 6ceddb1124..4b69ad44b1 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -363,13 +363,13 @@ def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that + served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py index 7ff11e8f8b..e48d7ff74b 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -374,13 +374,13 @@ def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that + served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py index 0a5588e7bf..a59e353adb 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py @@ -2083,6 +2083,22 @@ def __call__( resp, _ = self._interceptor.post_server_streaming_predict_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.PredictionServiceClient.server_streaming_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "rpcName": "ServerStreamingPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _StreamDirectPredict( @@ -2258,6 +2274,22 @@ def __call__( resp, _ = self._interceptor.post_stream_generate_content_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.PredictionServiceClient.stream_generate_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "rpcName": "StreamGenerateContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _StreamingPredict( @@ -2477,6 +2509,22 @@ def __call__( resp, _ = self._interceptor.post_stream_raw_predict_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.PredictionServiceClient.stream_raw_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "rpcName": "StreamRawPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py index 3429eeaaf6..ae8716306a 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest_asyncio.py @@ -2250,12 +2250,28 @@ async def __call__( ) resp = await self._interceptor.post_server_streaming_predict(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_server_streaming_predict_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_server_streaming_predict_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.PredictionServiceAsyncClient.server_streaming_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "rpcName": "ServerStreamingPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _StreamDirectPredict( @@ -2430,12 +2446,28 @@ async def __call__( ) resp = await self._interceptor.post_stream_generate_content(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_stream_generate_content_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_stream_generate_content_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.PredictionServiceAsyncClient.stream_generate_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "rpcName": "StreamGenerateContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _StreamingPredict( @@ -2661,6 +2693,23 @@ async def __call__( resp, _ = await self._interceptor.post_stream_raw_predict_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.PredictionServiceAsyncClient.stream_raw_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "rpcName": "StreamRawPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp @property diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py index c729b13871..4d36c2d70d 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/async_client.py @@ -48,9 +48,7 @@ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py index aef703bc91..2c45691b2c 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/client.py @@ -63,9 +63,7 @@ _LOGGER = std_logging.getLogger(__name__) from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -641,11 +639,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ReasoningEngineExecutionServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ReasoningEngineExecutionServiceClient._read_environment_variables() + ) self._client_cert_source = ( ReasoningEngineExecutionServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py index 578b21c99d..7edc62cc18 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/base.py @@ -28,9 +28,7 @@ import google.protobuf from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py index f0d02124a8..3e318d2f63 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc.py @@ -31,9 +31,7 @@ import proto # type: ignore from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py index 4c57fe4818..64a9d7611a 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py @@ -34,9 +34,7 @@ from grpc.experimental import aio # type: ignore from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py index 21c05eaa2e..93d3a839b4 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest.py @@ -37,9 +37,7 @@ from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.longrunning import operations_pb2 # type: ignore @@ -886,12 +884,27 @@ def __call__( resp = self._interceptor.post_stream_query_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_stream_query_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_stream_query_reasoning_engine_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.ReasoningEngineExecutionServiceClient.stream_query_reasoning_engine", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ReasoningEngineExecutionService", + "rpcName": "StreamQueryReasoningEngine", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py index 7df31e2706..f488bb6e2e 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_asyncio.py @@ -49,9 +49,7 @@ from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.longrunning import operations_pb2 # type: ignore @@ -895,11 +893,10 @@ async def __call__( _BaseReasoningEngineExecutionServiceRestTransport._BaseStreamQueryReasoningEngine._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_stream_query_reasoning_engine( - request, metadata + request, metadata = ( + await self._interceptor.pre_stream_query_reasoning_engine( + request, metadata + ) ) transcoded_request = _BaseReasoningEngineExecutionServiceRestTransport._BaseStreamQueryReasoningEngine._get_transcoded_request( http_options, request @@ -969,12 +966,28 @@ async def __call__( ) resp = await self._interceptor.post_stream_query_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_stream_query_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_stream_query_reasoning_engine_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.ReasoningEngineExecutionServiceAsyncClient.stream_query_reasoning_engine", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ReasoningEngineExecutionService", + "rpcName": "StreamQueryReasoningEngine", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp @property diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py index b543d26f2a..135bdf8a2b 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_execution_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py index f507e5d3fc..31103cee7d 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/async_client.py @@ -47,15 +47,11 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.reasoning_engine_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.reasoning_engine_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import reasoning_engine -from google.cloud.aiplatform_v1.types import ( - reasoning_engine as gca_reasoning_engine, -) +from google.cloud.aiplatform_v1.types import reasoning_engine as gca_reasoning_engine from google.cloud.aiplatform_v1.types import reasoning_engine_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py index e07d4df54b..94a95c4943 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/client.py @@ -63,15 +63,11 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.reasoning_engine_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.reasoning_engine_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import reasoning_engine -from google.cloud.aiplatform_v1.types import ( - reasoning_engine as gca_reasoning_engine, -) +from google.cloud.aiplatform_v1.types import reasoning_engine as gca_reasoning_engine from google.cloud.aiplatform_v1.types import reasoning_engine_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -660,11 +656,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ReasoningEngineServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ReasoningEngineServiceClient._read_environment_variables() + ) self._client_cert_source = ReasoningEngineServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py index 510da1382d..72b5c9872e 100644 --- a/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/reasoning_engine_service/transports/rest_asyncio.py @@ -935,11 +935,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_reasoning_engine_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1098,11 +1097,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_reasoning_engine_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1589,11 +1587,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_reasoning_engine_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py index 76326d2404..dd8ea75c90 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py @@ -1132,9 +1132,9 @@ async def sample_update_schedule(): Required. The Schedule which replaces the resource on the server. The following restrictions will be applied: - - The scheduled request type cannot be changed. - - The non-empty fields cannot be unset. - - The output_only fields will be ignored if specified. + - The scheduled request type cannot be changed. + - The non-empty fields cannot be unset. + - The output_only fields will be ignored if specified. This corresponds to the ``schedule`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/schedule_service/client.py b/google/cloud/aiplatform_v1/services/schedule_service/client.py index f0d3d0a2f6..454768767f 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/client.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/client.py @@ -879,11 +879,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ScheduleServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ScheduleServiceClient._read_environment_variables() + ) self._client_cert_source = ScheduleServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1786,9 +1784,9 @@ def sample_update_schedule(): Required. The Schedule which replaces the resource on the server. The following restrictions will be applied: - - The scheduled request type cannot be changed. - - The non-empty fields cannot be unset. - - The output_only fields will be ignored if specified. + - The scheduled request type cannot be changed. + - The non-empty fields cannot be unset. + - The output_only fields will be ignored if specified. This corresponds to the ``schedule`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index 13112bd91b..1f781cc082 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -47,14 +47,10 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.specialist_pool_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool from google.cloud.aiplatform_v1.types import specialist_pool_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py index 130848079e..5aaa7f644d 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -63,14 +63,10 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.specialist_pool_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool from google.cloud.aiplatform_v1.types import specialist_pool_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -642,11 +638,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = SpecialistPoolServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + SpecialistPoolServiceClient._read_environment_variables() + ) self._client_cert_source = SpecialistPoolServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py index 31df45ad1c..a2e74b1084 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py @@ -60,9 +60,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py index 4a906246cd..45115dbe82 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py @@ -75,9 +75,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( @@ -726,11 +724,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = TensorboardServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + TensorboardServiceClient._read_environment_variables() + ) self._client_cert_source = TensorboardServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py index 6824a099cf..4b03b67c96 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py @@ -34,9 +34,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py index 72fb9b300c..3be28ac19d 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py @@ -37,9 +37,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py index 8d4282dd6f..336508197c 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py @@ -40,9 +40,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py index 29438f5244..4af211588e 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py @@ -43,9 +43,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( @@ -4292,11 +4290,10 @@ def __call__( resp = self._interceptor.post_batch_create_tensorboard_runs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_create_tensorboard_runs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_create_tensorboard_runs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4392,11 +4389,10 @@ def __call__( _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_create_tensorboard_time_series( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_create_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -4464,11 +4460,10 @@ def __call__( resp = self._interceptor.post_batch_create_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4561,11 +4556,10 @@ def __call__( _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_read_tensorboard_time_series_data( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_read_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -4628,11 +4622,10 @@ def __call__( resp = self._interceptor.post_batch_read_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4949,11 +4942,10 @@ def __call__( resp = self._interceptor.post_create_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5277,11 +5269,10 @@ def __call__( resp = self._interceptor.post_create_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5585,11 +5576,10 @@ def __call__( resp = self._interceptor.post_delete_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5891,11 +5881,10 @@ def __call__( resp = self._interceptor.post_delete_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5987,11 +5976,10 @@ def __call__( _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_export_tensorboard_time_series_data( - request, metadata + request, metadata = ( + self._interceptor.pre_export_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -6059,11 +6047,10 @@ def __call__( resp = self._interceptor.post_export_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_export_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_export_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7463,6 +7450,22 @@ def __call__( resp, _ = self._interceptor.post_read_tensorboard_blob_data_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.TensorboardServiceClient.read_tensorboard_blob_data", + extra={ + "serviceName": "google.cloud.aiplatform.v1.TensorboardService", + "rpcName": "ReadTensorboardBlobData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ReadTensorboardSize( @@ -7748,11 +7751,10 @@ def __call__( resp = self._interceptor.post_read_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8226,11 +8228,10 @@ def __call__( resp = self._interceptor.post_update_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8554,11 +8555,10 @@ def __call__( resp = self._interceptor.post_update_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8723,11 +8723,10 @@ def __call__( resp = self._interceptor.post_write_tensorboard_experiment_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_write_tensorboard_experiment_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_write_tensorboard_experiment_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py index 8c281b3f85..708ec9bbbe 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_asyncio.py @@ -56,9 +56,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( @@ -2482,11 +2480,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardRuns._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_create_tensorboard_runs( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_create_tensorboard_runs( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardRuns._get_transcoded_request( http_options, request @@ -2557,11 +2554,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_create_tensorboard_runs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_create_tensorboard_runs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_create_tensorboard_runs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2658,11 +2654,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_create_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_create_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -2737,11 +2732,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2835,11 +2829,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_read_tensorboard_time_series_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_read_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -2909,11 +2902,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3174,11 +3166,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseCreateTensorboardExperiment._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_tensorboard_experiment( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_tensorboard_experiment( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseCreateTensorboardExperiment._get_transcoded_request( http_options, request @@ -3249,11 +3240,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3517,11 +3507,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseCreateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseCreateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -3592,11 +3581,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3851,11 +3839,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardExperiment._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_tensorboard_experiment( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_tensorboard_experiment( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardExperiment._get_transcoded_request( http_options, request @@ -3921,11 +3908,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4176,11 +4162,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -4246,11 +4231,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4343,11 +4327,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_export_tensorboard_time_series_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_export_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -4422,11 +4405,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_export_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_export_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4748,11 +4730,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5070,11 +5051,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5168,11 +5148,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseListTensorboardExperiments._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_tensorboard_experiments( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_tensorboard_experiments( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseListTensorboardExperiments._get_transcoded_request( http_options, request @@ -5238,11 +5217,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_tensorboard_experiments(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_tensorboard_experiments_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_tensorboard_experiments_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5658,11 +5636,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseListTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseListTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -5728,11 +5705,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5892,12 +5868,28 @@ async def __call__( ) resp = await self._interceptor.post_read_tensorboard_blob_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_read_tensorboard_blob_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_read_tensorboard_blob_data_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", + extra={ + "serviceName": "google.cloud.aiplatform.v1.TensorboardService", + "rpcName": "ReadTensorboardBlobData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _ReadTensorboardSize( @@ -6127,11 +6119,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseReadTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_read_tensorboard_time_series_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_read_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseReadTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -6197,11 +6188,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_read_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6623,11 +6613,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardExperiment._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_tensorboard_experiment( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_tensorboard_experiment( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardExperiment._get_transcoded_request( http_options, request @@ -6698,11 +6687,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6966,11 +6954,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -7041,11 +7028,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7142,11 +7128,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseWriteTensorboardExperimentData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_write_tensorboard_experiment_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_write_tensorboard_experiment_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseWriteTensorboardExperimentData._get_transcoded_request( http_options, request @@ -7219,11 +7204,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_write_tensorboard_experiment_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_write_tensorboard_experiment_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_write_tensorboard_experiment_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7388,11 +7372,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_write_tensorboard_run_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_write_tensorboard_run_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_write_tensorboard_run_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py index 7d44637071..842c2edfcb 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest_base.py @@ -33,9 +33,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py index c9a47a3d27..555449aa61 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.vertex_rag_data_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import operation as gca_operation diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py index 9dedde29e3..f7aaaff819 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.vertex_rag_data_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -744,11 +742,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = VertexRagDataServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + VertexRagDataServiceClient._read_environment_variables() + ) self._client_cert_source = VertexRagDataServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py index 9eb78116dc..e3a9b5a35b 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_data_service/transports/rest_asyncio.py @@ -2969,11 +2969,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_rag_engine_config(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_rag_engine_config_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_rag_engine_config_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/vertex_rag_service/client.py b/google/cloud/aiplatform_v1/services/vertex_rag_service/client.py index a1f96c96db..715164c5fa 100644 --- a/google/cloud/aiplatform_v1/services/vertex_rag_service/client.py +++ b/google/cloud/aiplatform_v1/services/vertex_rag_service/client.py @@ -623,11 +623,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = VertexRagServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + VertexRagServiceClient._read_environment_variables() + ) self._client_cert_source = VertexRagServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/vizier_service/client.py b/google/cloud/aiplatform_v1/services/vizier_service/client.py index bc6cdc19db..1567ca7787 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/client.py @@ -673,11 +673,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = VizierServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + VizierServiceClient._read_environment_variables() + ) self._client_cert_source = VizierServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py index 5b426a81b5..1789f6a776 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py @@ -3412,11 +3412,10 @@ def __call__( resp = self._interceptor.post_check_trial_early_stopping_state(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_check_trial_early_stopping_state_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_check_trial_early_stopping_state_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py index 21cc2f80b8..024f75a244 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest_asyncio.py @@ -1531,11 +1531,10 @@ async def __call__( _BaseVizierServiceRestTransport._BaseCheckTrialEarlyStoppingState._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_check_trial_early_stopping_state( - request, metadata + request, metadata = ( + await self._interceptor.pre_check_trial_early_stopping_state( + request, metadata + ) ) transcoded_request = _BaseVizierServiceRestTransport._BaseCheckTrialEarlyStoppingState._get_transcoded_request( http_options, request @@ -1606,11 +1605,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_check_trial_early_stopping_state(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_check_trial_early_stopping_state_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_check_trial_early_stopping_state_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1/types/accelerator_type.py b/google/cloud/aiplatform_v1/types/accelerator_type.py index 54db499797..44c8fb5cb4 100644 --- a/google/cloud/aiplatform_v1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1/types/accelerator_type.py @@ -29,7 +29,7 @@ class AcceleratorType(proto.Enum): - r"""LINT: LEGACY_NAMES Represents a hardware accelerator type. + r"""Represents a hardware accelerator type. Values: ACCELERATOR_TYPE_UNSPECIFIED (0): @@ -63,6 +63,8 @@ class AcceleratorType(proto.Enum): Nvidia B200 GPU. NVIDIA_GB200 (17): Nvidia GB200 GPU. + NVIDIA_RTX_PRO_6000 (18): + Nvidia RTX Pro 6000 GPU. TPU_V2 (6): TPU v2. TPU_V3 (7): @@ -87,6 +89,7 @@ class AcceleratorType(proto.Enum): NVIDIA_H200_141GB = 15 NVIDIA_B200 = 16 NVIDIA_GB200 = 17 + NVIDIA_RTX_PRO_6000 = 18 TPU_V2 = 6 TPU_V3 = 7 TPU_V4_POD = 10 diff --git a/google/cloud/aiplatform_v1/types/annotation.py b/google/cloud/aiplatform_v1/types/annotation.py index 4bc2e7d69f..61ed851b98 100644 --- a/google/cloud/aiplatform_v1/types/annotation.py +++ b/google/cloud/aiplatform_v1/types/annotation.py @@ -80,15 +80,14 @@ class Annotation(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Annotation: - - "aiplatform.googleapis.com/annotation_set_name": - optional, name of the UI's annotation set this Annotation - belongs to. If not set, the Annotation is not visible in - the UI. + - "aiplatform.googleapis.com/annotation_set_name": optional, + name of the UI's annotation set this Annotation belongs + to. If not set, the Annotation is not visible in the UI. - - "aiplatform.googleapis.com/payload_schema": output only, - its value is the - [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri] - title. + - "aiplatform.googleapis.com/payload_schema": output only, + its value is the + [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri] + title. """ name: str = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index 4a5e92a48a..e1c263e295 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -19,12 +19,8 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - completion_stats as gca_completion_stats, -) -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import completion_stats as gca_completion_stats +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import job_state @@ -146,17 +142,17 @@ class BatchPredictionJob(proto.Message): [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config] object: - - ``bigquery``: output includes a column named - ``explanation``. The value is a struct that conforms to - the [Explanation][google.cloud.aiplatform.v1.Explanation] - object. - - ``jsonl``: The JSON objects on each line include an - additional entry keyed ``explanation``. The value of the - entry is a JSON object that conforms to the - [Explanation][google.cloud.aiplatform.v1.Explanation] - object. - - ``csv``: Generating explanations for CSV format is not - supported. + - ``bigquery``: output includes a column named + ``explanation``. The value is a struct that conforms to + the [Explanation][google.cloud.aiplatform.v1.Explanation] + object. + - ``jsonl``: The JSON objects on each line include an + additional entry keyed ``explanation``. The value of the + entry is a JSON object that conforms to the + [Explanation][google.cloud.aiplatform.v1.Explanation] + object. + - ``csv``: Generating explanations for CSV format is not + supported. If this field is set to true, either the [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] @@ -315,49 +311,48 @@ class InstanceConfig(proto.Message): Supported values are: - - ``object``: Each input is converted to JSON object - format. - - - For ``bigquery``, each row is converted to an object. - - For ``jsonl``, each line of the JSONL input must be an - object. - - Does not apply to ``csv``, ``file-list``, - ``tf-record``, or ``tf-record-gzip``. - - - ``array``: Each input is converted to JSON array format. - - - For ``bigquery``, each row is converted to an array. - The order of columns is determined by the BigQuery - column order, unless - [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] - is populated. - [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] - must be populated for specifying field orders. - - For ``jsonl``, if each line of the JSONL input is an - object, - [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] - must be populated for specifying field orders. - - Does not apply to ``csv``, ``file-list``, - ``tf-record``, or ``tf-record-gzip``. + - ``object``: Each input is converted to JSON object format. + + - For ``bigquery``, each row is converted to an object. + - For ``jsonl``, each line of the JSONL input must be an + object. + - Does not apply to ``csv``, ``file-list``, ``tf-record``, + or ``tf-record-gzip``. + + - ``array``: Each input is converted to JSON array format. + + - For ``bigquery``, each row is converted to an array. The + order of columns is determined by the BigQuery column + order, unless + [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] + is populated. + [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] + must be populated for specifying field orders. + - For ``jsonl``, if each line of the JSONL input is an + object, + [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] + must be populated for specifying field orders. + - Does not apply to ``csv``, ``file-list``, ``tf-record``, + or ``tf-record-gzip``. If not specified, Vertex AI converts the batch prediction input as follows: - - For ``bigquery`` and ``csv``, the behavior is the same as - ``array``. The order of columns is the same as defined in - the file or table, unless - [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] - is populated. - - For ``jsonl``, the prediction instance format is - determined by each line of the input. - - For ``tf-record``/``tf-record-gzip``, each record will be - converted to an object in the format of - ``{"b64": }``, where ```` is the - Base64-encoded string of the content of the record. - - For ``file-list``, each file in the list will be - converted to an object in the format of - ``{"b64": }``, where ```` is the - Base64-encoded string of the content of the file. + - For ``bigquery`` and ``csv``, the behavior is the same as + ``array``. The order of columns is the same as defined in + the file or table, unless + [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] + is populated. + - For ``jsonl``, the prediction instance format is + determined by each line of the input. + - For ``tf-record``/``tf-record-gzip``, each record will be + converted to an object in the format of + ``{"b64": }``, where ```` is the + Base64-encoded string of the content of the record. + - For ``file-list``, each file in the list will be converted + to an object in the format of ``{"b64": }``, where + ```` is the Base64-encoded string of the content of + the file. key_field (str): The name of the field that is considered as a key. @@ -370,11 +365,11 @@ class InstanceConfig(proto.Message): value of the key field, in a field named ``key`` in the output: - - For ``jsonl`` output format, the output will have a - ``key`` field instead of the ``instance`` field. - - For ``csv``/``bigquery`` output format, the output will - have have a ``key`` column instead of the instance - feature columns. + - For ``jsonl`` output format, the output will have a + ``key`` field instead of the ``instance`` field. + - For ``csv``/``bigquery`` output format, the output will + have have a ``key`` column instead of the instance feature + columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. diff --git a/google/cloud/aiplatform_v1/types/cached_content.py b/google/cloud/aiplatform_v1/types/cached_content.py index bbd6fd1294..f3fb112609 100644 --- a/google/cloud/aiplatform_v1/types/cached_content.py +++ b/google/cloud/aiplatform_v1/types/cached_content.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import content -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import tool from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/content.py b/google/cloud/aiplatform_v1/types/content.py index bf80aaa5c5..37ed74bd64 100644 --- a/google/cloud/aiplatform_v1/types/content.py +++ b/google/cloud/aiplatform_v1/types/content.py @@ -394,11 +394,11 @@ class GenerationConfig(proto.Message): Optional. Output response mimetype of the generated candidate text. Supported mimetype: - - ``text/plain``: (default) Text output. - - ``application/json``: JSON response in the candidates. - The model needs to be prompted to output the appropriate - response type, otherwise the behavior is undefined. This - is a preview feature. + - ``text/plain``: (default) Text output. + - ``application/json``: JSON response in the candidates. The + model needs to be prompted to output the appropriate + response type, otherwise the behavior is undefined. This + is a preview feature. response_schema (google.cloud.aiplatform_v1.types.Schema): Optional. The ``Schema`` object allows the definition of input and output data types. These types can be objects, but @@ -422,26 +422,26 @@ class GenerationConfig(proto.Message): supported. Specifically, only the following properties are supported: - - ``$id`` - - ``$defs`` - - ``$ref`` - - ``$anchor`` - - ``type`` - - ``format`` - - ``title`` - - ``description`` - - ``enum`` (for strings and numbers) - - ``items`` - - ``prefixItems`` - - ``minItems`` - - ``maxItems`` - - ``minimum`` - - ``maximum`` - - ``anyOf`` - - ``oneOf`` (interpreted the same as ``anyOf``) - - ``properties`` - - ``additionalProperties`` - - ``required`` + - ``$id`` + - ``$defs`` + - ``$ref`` + - ``$anchor`` + - ``type`` + - ``format`` + - ``title`` + - ``description`` + - ``enum`` (for strings and numbers) + - ``items`` + - ``prefixItems`` + - ``minItems`` + - ``maxItems`` + - ``minimum`` + - ``maximum`` + - ``anyOf`` + - ``oneOf`` (interpreted the same as ``anyOf``) + - ``properties`` + - ``additionalProperties`` + - ``required`` The non-standard ``propertyOrdering`` property may also be set. @@ -1357,8 +1357,55 @@ class Maps(proto.Message): Can be used to look up the Place. This field is a member of `oneof`_ ``_place_id``. + place_answer_sources (google.cloud.aiplatform_v1.types.GroundingChunk.Maps.PlaceAnswerSources): + Sources used to generate the place answer. + This includes review snippets and photos that + were used to generate the answer, as well as + uris to flag content. """ + class PlaceAnswerSources(proto.Message): + r""" + + Attributes: + review_snippets (MutableSequence[google.cloud.aiplatform_v1.types.GroundingChunk.Maps.PlaceAnswerSources.ReviewSnippet]): + Snippets of reviews that are used to generate + the answer. + """ + + class ReviewSnippet(proto.Message): + r"""Encapsulates a review snippet. + + Attributes: + review_id (str): + Id of the review referencing the place. + google_maps_uri (str): + A link to show the review on Google Maps. + title (str): + Title of the review. + """ + + review_id: str = proto.Field( + proto.STRING, + number=1, + ) + google_maps_uri: str = proto.Field( + proto.STRING, + number=2, + ) + title: str = proto.Field( + proto.STRING, + number=3, + ) + + review_snippets: MutableSequence[ + "GroundingChunk.Maps.PlaceAnswerSources.ReviewSnippet" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="GroundingChunk.Maps.PlaceAnswerSources.ReviewSnippet", + ) + uri: str = proto.Field( proto.STRING, number=1, @@ -1379,6 +1426,11 @@ class Maps(proto.Message): number=4, optional=True, ) + place_answer_sources: "GroundingChunk.Maps.PlaceAnswerSources" = proto.Field( + proto.MESSAGE, + number=5, + message="GroundingChunk.Maps.PlaceAnswerSources", + ) web: Web = proto.Field( proto.MESSAGE, @@ -1470,8 +1522,37 @@ class GroundingMetadata(proto.Message): Google Maps grounding. This field is a member of `oneof`_ ``_google_maps_widget_context_token``. + source_flagging_uris (MutableSequence[google.cloud.aiplatform_v1.types.GroundingMetadata.SourceFlaggingUri]): + List of source flagging uris. This is + currently populated only for Google Maps + grounding. """ + class SourceFlaggingUri(proto.Message): + r"""Source content flagging uri for a place or review. This is + currently populated only for Google Maps grounding. + + Attributes: + source_id (str): + Id of the place or review. + flag_content_uri (str): + A link where users can flag a problem with + the source (place or review). (-- The link is + generated by Google and it does not contain + information from the user query. It may contain + information of the content it is flagging, which + can be used to identify places. --) + """ + + source_id: str = proto.Field( + proto.STRING, + number=1, + ) + flag_content_uri: str = proto.Field( + proto.STRING, + number=2, + ) + web_search_queries: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, @@ -1503,6 +1584,11 @@ class GroundingMetadata(proto.Message): number=8, optional=True, ) + source_flagging_uris: MutableSequence[SourceFlaggingUri] = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=SourceFlaggingUri, + ) class SearchEntryPoint(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index caed31b232..cb70c2ded7 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import job_state @@ -253,20 +251,20 @@ class CustomJobSpec(proto.Message): For CustomJob: - - AIP_MODEL_DIR = ``/model/`` - - AIP_CHECKPOINT_DIR = - ``/checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``/logs/`` + - AIP_MODEL_DIR = ``/model/`` + - AIP_CHECKPOINT_DIR = + ``/checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``/logs/`` For CustomJob backing a Trial of HyperparameterTuningJob: - - AIP_MODEL_DIR = - ``//model/`` - - AIP_CHECKPOINT_DIR = - ``//checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``//logs/`` + - AIP_MODEL_DIR = + ``//model/`` + - AIP_CHECKPOINT_DIR = + ``//checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``//logs/`` protected_artifact_location_id (str): The ID of the location to store protected artifacts. e.g. us-central1. Populate only when diff --git a/google/cloud/aiplatform_v1/types/data_labeling_job.py b/google/cloud/aiplatform_v1/types/data_labeling_job.py index beca34ba35..2ea0ec2c65 100644 --- a/google/cloud/aiplatform_v1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1/types/data_labeling_job.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -121,10 +119,10 @@ class DataLabelingJob(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each DataLabelingJob: - - "aiplatform.googleapis.com/schema": output only, its - value is the - [inputs_schema][google.cloud.aiplatform.v1.DataLabelingJob.inputs_schema_uri]'s - title. + - "aiplatform.googleapis.com/schema": output only, its value + is the + [inputs_schema][google.cloud.aiplatform.v1.DataLabelingJob.inputs_schema_uri]'s + title. specialist_pools (MutableSequence[str]): The SpecialistPools' resource names associated with this job. diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index 186c9106f4..c810b9997b 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import saved_query from google.protobuf import struct_pb2 # type: ignore @@ -91,10 +89,10 @@ class Dataset(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Dataset: - - "aiplatform.googleapis.com/dataset_metadata_schema": - output only, its value is the - [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - title. + - "aiplatform.googleapis.com/dataset_metadata_schema": + output only, its value is the + [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + title. saved_queries (MutableSequence[google.cloud.aiplatform_v1.types.SavedQuery]): All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py index 05086b8737..aa594f5304 100644 --- a/google/cloud/aiplatform_v1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1/types/dataset_service.py @@ -22,9 +22,7 @@ from google.cloud.aiplatform_v1.types import annotation from google.cloud.aiplatform_v1.types import data_item as gca_data_item from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import saved_query as gca_saved_query @@ -148,9 +146,9 @@ class UpdateDatasetRequest(proto.Message): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` """ dataset: gca_dataset.Dataset = proto.Field( @@ -179,7 +177,7 @@ class UpdateDatasetVersionRequest(proto.Message): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` """ dataset_version: gca_dataset_version.DatasetVersion = proto.Field( @@ -206,19 +204,19 @@ class ListDatasetsRequest(proto.Message): An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. - - ``display_name``: supports = and != - - ``metadata_schema_uri``: supports = and != - - ``labels`` supports general map functions that is: + - ``display_name``: supports = and != + - ``metadata_schema_uri``: supports = and != + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. Some examples: - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` page_size (int): The standard list page size. page_token (str): @@ -230,9 +228,9 @@ class ListDatasetsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -386,7 +384,7 @@ class ExportDataResponse(proto.Message): All of the files that are exported in this export operation. For custom code training export, only three (training, validation and test) Cloud Storage paths in wildcard format - are populated (for example, gs://.../training-*). + are populated (for example, gs://.../training-\*). data_stats (google.cloud.aiplatform_v1.types.Model.DataStats): Only present for custom code training export use case. Records data stats, i.e., @@ -744,30 +742,30 @@ class SearchDataItemsRequest(proto.Message): An expression for filtering the DataItem that will be returned. - - ``data_item_id`` - for = or !=. - - ``labeled`` - for = or !=. - - ``has_annotation(ANNOTATION_SPEC_ID)`` - true only for - DataItem that have at least one annotation with - annotation_spec_id = ``ANNOTATION_SPEC_ID`` in the - context of SavedQuery or DataLabelingJob. + - ``data_item_id`` - for = or !=. + - ``labeled`` - for = or !=. + - ``has_annotation(ANNOTATION_SPEC_ID)`` - true only for + DataItem that have at least one annotation with + annotation_spec_id = ``ANNOTATION_SPEC_ID`` in the context + of SavedQuery or DataLabelingJob. For example: - - ``data_item=1`` - - ``has_annotation(5)`` + - ``data_item=1`` + - ``has_annotation(5)`` annotations_filter (str): An expression for filtering the Annotations that will be returned per DataItem. - - ``annotation_spec_id`` - for = or !=. + - ``annotation_spec_id`` - for = or !=. annotation_filters (MutableSequence[str]): An expression that specifies what Annotations will be returned per DataItem. Annotations satisfied either of the conditions will be returned. - - ``annotation_spec_id`` - for = or !=. Must specify - ``saved_query_id=`` - saved query id that annotations - should belong to. + - ``annotation_spec_id`` - for = or !=. Must specify + ``saved_query_id=`` - saved query id that annotations + should belong to. field_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields of [DataItemView][google.cloud.aiplatform.v1.DataItemView] to diff --git a/google/cloud/aiplatform_v1/types/deployment_resource_pool.py b/google/cloud/aiplatform_v1/types/deployment_resource_pool.py index df3352c995..991c9a6a6d 100644 --- a/google/cloud/aiplatform_v1/types/deployment_resource_pool.py +++ b/google/cloud/aiplatform_v1/types/deployment_resource_pool.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import machine_resources from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index 9a0679d18b..c149a18242 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import machine_resources @@ -174,6 +172,9 @@ class Endpoint(proto.Message): native RAG integration can be configured. Currently, only Model Garden models are supported. + private_model_server_enabled (bool): + If true, the model server will be isolated + from the external internet. """ name: str = proto.Field( @@ -274,6 +275,10 @@ class Endpoint(proto.Message): number=29, message="GenAiAdvancedFeaturesConfig", ) + private_model_server_enabled: bool = proto.Field( + proto.BOOL, + number=30, + ) class DeployedModel(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py index 5be21fa24b..b4b9bd99c0 100644 --- a/google/cloud/aiplatform_v1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - deployment_stage as gca_deployment_stage, -) +from google.cloud.aiplatform_v1.types import deployment_stage as gca_deployment_stage from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -152,26 +150,26 @@ class ListEndpointsRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``endpoint`` supports ``=`` and ``!=``. ``endpoint`` - represents the Endpoint ID, i.e. the last segment of the - Endpoint's [resource - name][google.cloud.aiplatform.v1.Endpoint.name]. - - ``display_name`` supports ``=`` and ``!=``. - - ``labels`` supports general map functions that is: + - ``endpoint`` supports ``=`` and ``!=``. ``endpoint`` + represents the Endpoint ID, i.e. the last segment of the + Endpoint's [resource + name][google.cloud.aiplatform.v1.Endpoint.name]. + - ``display_name`` supports ``=`` and ``!=``. + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - ``labels.key:*`` or ``labels:key`` - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - ``labels.key:*`` or ``labels:key`` - key existence + - A key including a space must be quoted. + ``labels."a key"``. - - ``base_model_name`` only supports ``=``. + - ``base_model_name`` only supports ``=``. Some examples: - - ``endpoint=1`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - - ``baseModelName="text-bison"`` + - ``endpoint=1`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``baseModelName="text-bison"`` page_size (int): Optional. The standard list page size. page_token (str): @@ -189,9 +187,9 @@ class ListEndpointsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. """ @@ -493,19 +491,19 @@ class MutateDeployedModelRequest(proto.Message): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``required_replica_count`` in - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See [google.protobuf.FieldMask][google.protobuf.FieldMask]. diff --git a/google/cloud/aiplatform_v1/types/entity_type.py b/google/cloud/aiplatform_v1/types/entity_type.py index 9a3da24a4b..b289d9fe08 100644 --- a/google/cloud/aiplatform_v1/types/entity_type.py +++ b/google/cloud/aiplatform_v1/types/entity_type.py @@ -44,7 +44,7 @@ class EntityType(proto.Message): The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist - only of ASCII Latin letters A-Z and a-z and underscore(_), + only of ASCII Latin letters A-Z and a-z and underscore(\_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. description (str): diff --git a/google/cloud/aiplatform_v1/types/evaluated_annotation.py b/google/cloud/aiplatform_v1/types/evaluated_annotation.py index c5f9f66f35..2bbced800a 100644 --- a/google/cloud/aiplatform_v1/types/evaluated_annotation.py +++ b/google/cloud/aiplatform_v1/types/evaluated_annotation.py @@ -181,8 +181,8 @@ class EvaluatedAnnotationExplanation(proto.Message): For AutoML Image Classification models, possible values are: - - ``image-integrated-gradients`` - - ``image-xrai`` + - ``image-integrated-gradients`` + - ``image-xrai`` explanation (google.cloud.aiplatform_v1.types.Explanation): Explanation attribution response details. """ diff --git a/google/cloud/aiplatform_v1/types/explanation.py b/google/cloud/aiplatform_v1/types/explanation.py index 712c9017cb..024e9dc839 100644 --- a/google/cloud/aiplatform_v1/types/explanation.py +++ b/google/cloud/aiplatform_v1/types/explanation.py @@ -194,20 +194,19 @@ class Attribution(proto.Message): The format of the value is determined by the feature's input format: - - If the feature is a scalar value, the attribution value - is a [floating - number][google.protobuf.Value.number_value]. + - If the feature is a scalar value, the attribution value is + a [floating number][google.protobuf.Value.number_value]. - - If the feature is an array of scalar values, the - attribution value is an - [array][google.protobuf.Value.list_value]. + - If the feature is an array of scalar values, the + attribution value is an + [array][google.protobuf.Value.list_value]. - - If the feature is a struct, the attribution value is a - [struct][google.protobuf.Value.struct_value]. The keys in - the attribution value struct are the same as the keys in - the feature struct. The formats of the values in the - attribution struct are determined by the formats of the - values in the feature struct. + - If the feature is a struct, the attribution value is a + [struct][google.protobuf.Value.struct_value]. The keys in + the attribution value struct are the same as the keys in + the feature struct. The formats of the values in the + attribution struct are determined by the formats of the + values in the feature struct. The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] @@ -243,21 +242,21 @@ class Attribution(proto.Message): caused by approximation used in the explanation method. Lower value means more precise attributions. - - For Sampled Shapley - [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution], - increasing - [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] - might reduce the error. - - For Integrated Gradients - [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution], - increasing - [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] - might reduce the error. - - For [XRAI - attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution], - increasing - [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] - might reduce the error. + - For Sampled Shapley + [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution], + increasing + [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] + might reduce the error. + - For Integrated Gradients + [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution], + increasing + [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] + might reduce the error. + - For [XRAI + attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution], + increasing + [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] + might reduce the error. See `this introduction `__ diff --git a/google/cloud/aiplatform_v1/types/feature.py b/google/cloud/aiplatform_v1/types/feature.py index 4b92f36f79..a124df4924 100644 --- a/google/cloud/aiplatform_v1/types/feature.py +++ b/google/cloud/aiplatform_v1/types/feature.py @@ -43,7 +43,7 @@ class Feature(proto.Message): The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscore(_), and ASCII + ASCII Latin letters A-Z and a-z, underscore(\_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. description (str): diff --git a/google/cloud/aiplatform_v1/types/feature_online_store.py b/google/cloud/aiplatform_v1/types/feature_online_store.py index c921d6c98a..b13a004329 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import service_networking from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py b/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py index 18f6548a20..4199faa2a5 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store_admin_service.py @@ -23,9 +23,7 @@ feature_online_store as gca_feature_online_store, ) from google.cloud.aiplatform_v1.types import feature_view as gca_feature_view -from google.cloud.aiplatform_v1.types import ( - feature_view_sync as gca_feature_view_sync, -) +from google.cloud.aiplatform_v1.types import feature_view_sync as gca_feature_view_sync from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -124,20 +122,20 @@ class ListFeatureOnlineStoresRequest(proto.Message): Lists the FeatureOnlineStores that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality and key presence. Examples: - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - FeatureOnlineStores created or updated after 2020-01-01. - - ``labels.env = "prod"`` FeatureOnlineStores with label - "env" set to "prod". + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + FeatureOnlineStores created or updated after 2020-01-01. + - ``labels.env = "prod"`` FeatureOnlineStores with label + "env" set to "prod". page_size (int): The maximum number of FeatureOnlineStores to return. The service may return fewer than this @@ -158,8 +156,8 @@ class ListFeatureOnlineStoresRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -236,11 +234,11 @@ class UpdateFeatureOnlineStoreRequest(proto.Message): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` """ feature_online_store: gca_feature_online_store.FeatureOnlineStore = proto.Field( @@ -356,25 +354,25 @@ class ListFeatureViewsRequest(proto.Message): Lists the FeatureViews that match the filter expression. The following filters are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality as well as key + presence. Examples: - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> FeatureViews created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - FeatureViews having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any FeatureView which has a label - with 'env' as the key. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> FeatureViews created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + FeatureViews having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any FeatureView which has a label + with 'env' as the key. page_size (int): The maximum number of FeatureViews to return. The service may return fewer than this value. If @@ -396,9 +394,9 @@ class ListFeatureViewsRequest(proto.Message): Supported fields: - - ``feature_view_id`` - - ``create_time`` - - ``update_time`` + - ``feature_view_id`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -473,16 +471,16 @@ class UpdateFeatureViewRequest(proto.Message): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` - - ``optimized_config.automatic_resources`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` + - ``optimized_config.automatic_resources`` """ feature_view: gca_feature_view.FeatureView = proto.Field( @@ -634,15 +632,15 @@ class ListFeatureViewSyncsRequest(proto.Message): Lists the FeatureViewSyncs that match the filter expression. The following filters are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. Examples: - - ``create_time > \"2020-01-31T15:30:00.000000Z\"`` --> - FeatureViewSyncs created after - 2020-01-31T15:30:00.000000Z. + - ``create_time > \"2020-01-31T15:30:00.000000Z\"`` --> + FeatureViewSyncs created after + 2020-01-31T15:30:00.000000Z. page_size (int): The maximum number of FeatureViewSyncs to return. The service may return fewer than this @@ -665,7 +663,7 @@ class ListFeatureViewSyncsRequest(proto.Message): Supported fields: - - ``create_time`` + - ``create_time`` """ parent: str = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/feature_registry_service.py b/google/cloud/aiplatform_v1/types/feature_registry_service.py index bc1e9f18db..0665b4d086 100644 --- a/google/cloud/aiplatform_v1/types/feature_registry_service.py +++ b/google/cloud/aiplatform_v1/types/feature_registry_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - feature_group as gca_feature_group, -) +from google.cloud.aiplatform_v1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -109,20 +107,20 @@ class ListFeatureGroupsRequest(proto.Message): Lists the FeatureGroups that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality and key presence. Examples: - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - FeatureGroups created or updated after 2020-01-01. - - ``labels.env = "prod"`` FeatureGroups with label "env" - set to "prod". + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + FeatureGroups created or updated after 2020-01-01. + - ``labels.env = "prod"`` FeatureGroups with label "env" set + to "prod". page_size (int): The maximum number of FeatureGroups to return. The service may return fewer than this @@ -142,8 +140,8 @@ class ListFeatureGroupsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -220,10 +218,10 @@ class UpdateFeatureGroupRequest(proto.Message): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` """ feature_group: gca_feature_group.FeatureGroup = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/feature_selector.py b/google/cloud/aiplatform_v1/types/feature_selector.py index eab5e51d8f..5150e1148d 100644 --- a/google/cloud/aiplatform_v1/types/feature_selector.py +++ b/google/cloud/aiplatform_v1/types/feature_selector.py @@ -36,10 +36,10 @@ class IdMatcher(proto.Message): ids (MutableSequence[str]): Required. The following are accepted as ``ids``: - - A single-element list containing only ``*``, which - selects all Features in the target EntityType, or - - A list containing only Feature IDs, which selects only - Features with those IDs in the target EntityType. + - A single-element list containing only ``*``, which selects + all Features in the target EntityType, or + - A list containing only Feature IDs, which selects only + Features with those IDs in the target EntityType. """ ids: MutableSequence[str] = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1/types/feature_view.py b/google/cloud/aiplatform_v1/types/feature_view.py index 8a59837bdd..4a1e7dcf9c 100644 --- a/google/cloud/aiplatform_v1/types/feature_view.py +++ b/google/cloud/aiplatform_v1/types/feature_view.py @@ -398,13 +398,13 @@ class VertexRagSource(proto.Message): materialized on each manual sync trigger. The table/view is expected to have the following columns and types at least: - - ``corpus_id`` (STRING, NULLABLE/REQUIRED) - - ``file_id`` (STRING, NULLABLE/REQUIRED) - - ``chunk_id`` (STRING, NULLABLE/REQUIRED) - - ``chunk_data_type`` (STRING, NULLABLE/REQUIRED) - - ``chunk_data`` (STRING, NULLABLE/REQUIRED) - - ``embeddings`` (FLOAT, REPEATED) - - ``file_original_uri`` (STRING, NULLABLE/REQUIRED) + - ``corpus_id`` (STRING, NULLABLE/REQUIRED) + - ``file_id`` (STRING, NULLABLE/REQUIRED) + - ``chunk_id`` (STRING, NULLABLE/REQUIRED) + - ``chunk_data_type`` (STRING, NULLABLE/REQUIRED) + - ``chunk_data`` (STRING, NULLABLE/REQUIRED) + - ``embeddings`` (FLOAT, REPEATED) + - ``file_original_uri`` (STRING, NULLABLE/REQUIRED) rag_corpus_id (int): Optional. The RAG corpus id corresponding to this FeatureView. diff --git a/google/cloud/aiplatform_v1/types/featurestore.py b/google/cloud/aiplatform_v1/types/featurestore.py index 6be53cbe60..11e1c37276 100644 --- a/google/cloud/aiplatform_v1/types/featurestore.py +++ b/google/cloud/aiplatform_v1/types/featurestore.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1/types/featurestore_online_service.py index c9afbc0d6f..705be0c60d 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_online_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_online_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - feature_selector as gca_feature_selector, -) +from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector from google.cloud.aiplatform_v1.types import types from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/featurestore_service.py b/google/cloud/aiplatform_v1/types/featurestore_service.py index 074b826d1d..cfe75e7a5b 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -21,9 +21,7 @@ from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import ( - feature_selector as gca_feature_selector, -) +from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import operation @@ -147,23 +145,23 @@ class ListFeaturestoresRequest(proto.Message): Lists the featurestores that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``online_serving_config.fixed_node_count``: Supports - ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` - comparisons. - - ``labels``: Supports key-value equality and key presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``online_serving_config.fixed_node_count``: Supports + ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` + comparisons. + - ``labels``: Supports key-value equality and key presence. Examples: - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - Featurestores created or updated after 2020-01-01. - - ``labels.env = "prod"`` Featurestores with label "env" - set to "prod". + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + Featurestores created or updated after 2020-01-01. + - ``labels.env = "prod"`` Featurestores with label "env" set + to "prod". page_size (int): The maximum number of Featurestores to return. The service may return fewer than this @@ -183,9 +181,9 @@ class ListFeaturestoresRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` - - ``update_time`` - - ``online_serving_config.fixed_node_count`` + - ``create_time`` + - ``update_time`` + - ``online_serving_config.fixed_node_count`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ @@ -267,10 +265,10 @@ class UpdateFeaturestoreRequest(proto.Message): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` """ featurestore: gca_featurestore.Featurestore = proto.Field( @@ -469,10 +467,10 @@ class ImportFeatureValuesResponse(proto.Message): The number of rows in input source that weren't imported due to either - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). timestamp_outside_retention_rows_count (int): The number rows that weren't ingested due to having feature timestamps outside the retention @@ -943,25 +941,25 @@ class ListEntityTypesRequest(proto.Message): Lists the EntityTypes that match the filter expression. The following filters are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality as well as key + presence. Examples: - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - EntityTypes having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any EntityType which has a label - with 'env' as the key. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + EntityTypes having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any EntityType which has a label + with 'env' as the key. page_size (int): The maximum number of EntityTypes to return. The service may return fewer than this value. If @@ -983,9 +981,9 @@ class ListEntityTypesRequest(proto.Message): Supported fields: - - ``entity_type_id`` - - ``create_time`` - - ``update_time`` + - ``entity_type_id`` + - ``create_time`` + - ``update_time`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ @@ -1067,16 +1065,16 @@ class UpdateEntityTypeRequest(proto.Message): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` """ entity_type: gca_entity_type.EntityType = proto.Field( @@ -1243,26 +1241,24 @@ class ListFeaturesRequest(proto.Message): Lists the Features that match the filter expression. The following filters are supported: - - ``value_type``: Supports = and != comparisons. - - ``create_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. + - ``value_type``: Supports = and != comparisons. + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. Examples: - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - 'env' as the key. + - ``value_type = DOUBLE`` --> Features whose type is DOUBLE. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> Features + having both (active: yes) and (env: prod) labels. + - ``labels.env: *`` --> Any Feature which has a label with + 'env' as the key. page_size (int): The maximum number of Features to return. The service may return fewer than this value. If @@ -1286,11 +1282,10 @@ class ListFeaturesRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``feature_id`` - - ``value_type`` (Not supported for FeatureRegistry - Feature) - - ``create_time`` - - ``update_time`` + - ``feature_id`` + - ``value_type`` (Not supported for FeatureRegistry Feature) + - ``create_time`` + - ``update_time`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. latest_stats_count (int): @@ -1385,14 +1380,14 @@ class SearchFeaturesRequest(proto.Message): FIELD. The QUERY and the FIELD are converted to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing the - search value. Characters that are not one of alphanumeric - ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are - treated as delimiters for tokens. ``*`` is treated as a - wildcard that matches characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing the + search value. Characters that are not one of alphanumeric + ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are + treated as delimiters for tokens. ``*`` is treated as a + wildcard that matches characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double quotation @@ -1402,47 +1397,45 @@ class SearchFeaturesRequest(proto.Message): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature with ID - containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches a - Feature with ID containing the substring ``foo`` and - description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with ID + containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as key - presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> Features + having both (active: yes) and (env: prod) labels. + - ``labels.env: *`` --> Any Feature which has a label with + ``env`` as the key. page_size (int): The maximum number of Features to return. The service may return fewer than this value. If @@ -1488,11 +1481,11 @@ class SearchFeaturesResponse(proto.Message): Fields returned: - - ``name`` - - ``description`` - - ``labels`` - - ``create_time`` - - ``update_time`` + - ``name`` + - ``description`` + - ``labels`` + - ``create_time`` + - ``update_time`` next_page_token (str): A token, which can be sent as [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1.SearchFeaturesRequest.page_token] @@ -1538,12 +1531,12 @@ class UpdateFeatureRequest(proto.Message): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) """ feature: gca_feature.Feature = proto.Field( @@ -1627,10 +1620,10 @@ class ImportFeatureValuesOperationMetadata(proto.Message): The number of rows in input source that weren't imported due to either - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). timestamp_outside_retention_rows_count (int): The number rows that weren't ingested due to having timestamps outside the retention diff --git a/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py b/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py index ced9f1da16..3c702e7173 100644 --- a/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py +++ b/google/cloud/aiplatform_v1/types/gen_ai_cache_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.protobuf import field_mask_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py index 915f56d6d9..e5eaee7b18 100644 --- a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import study from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/index.py b/google/cloud/aiplatform_v1/types/index.py index 8c0d8c6192..3c306c4428 100644 --- a/google/cloud/aiplatform_v1/types/index.py +++ b/google/cloud/aiplatform_v1/types/index.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import deployed_index_ref -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/index_endpoint.py b/google/cloud/aiplatform_v1/types/index_endpoint.py index bf0b130cef..f791cb5484 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import service_networking from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1/types/index_endpoint_service.py index c657a1aa00..b5e81434ba 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -119,25 +117,25 @@ class ListIndexEndpointsRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``index_endpoint`` supports = and !=. ``index_endpoint`` - represents the IndexEndpoint ID, ie. the last segment of - the IndexEndpoint's - [resourcename][google.cloud.aiplatform.v1.IndexEndpoint.name]. - - ``display_name`` supports =, != and regex() (uses - `re2 `__ - syntax) - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality - ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a - key"`. + - ``index_endpoint`` supports = and !=. ``index_endpoint`` + represents the IndexEndpoint ID, ie. the last segment of + the IndexEndpoint's + [resourcename][google.cloud.aiplatform.v1.IndexEndpoint.name]. + - ``display_name`` supports =, != and regex() (uses + `re2 `__ + syntax) + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality + ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a + key"\`. Some examples: - - ``index_endpoint="1"`` - - ``display_name="myDisplayName"`` - - \`regex(display_name, "^A") -> The display name starts - with an A. - - ``labels.myKey="myValue"`` + - ``index_endpoint="1"`` + - ``display_name="myDisplayName"`` + - \`regex(display_name, "^A") -> The display name starts + with an A. + - ``labels.myKey="myValue"`` page_size (int): Optional. The standard list page size. page_token (str): diff --git a/google/cloud/aiplatform_v1/types/index_service.py b/google/cloud/aiplatform_v1/types/index_service.py index 15b3774a0b..e252eab49b 100644 --- a/google/cloud/aiplatform_v1/types/index_service.py +++ b/google/cloud/aiplatform_v1/types/index_service.py @@ -272,8 +272,8 @@ class UpsertDatapointsRequest(proto.Message): Updatable fields: - - Use ``all_restricts`` to update both restricts and - numeric_restricts. + - Use ``all_restricts`` to update both restricts and + numeric_restricts. """ index: str = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py index d6b4ba2a03..07d89f0bea 100644 --- a/google/cloud/aiplatform_v1/types/io.py +++ b/google/cloud/aiplatform_v1/types/io.py @@ -117,8 +117,8 @@ class BigQuerySource(proto.Message): Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: - - BigQuery path. For example: - ``bq://projectId.bqDatasetId.bqTableId``. + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. """ input_uri: str = proto.Field( @@ -141,9 +141,9 @@ class BigQueryDestination(proto.Message): Accepted forms: - - BigQuery path. For example: ``bq://projectId`` or - ``bq://projectId.bqDatasetId`` or - ``bq://projectId.bqDatasetId.bqTableId``. + - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId`` or + ``bq://projectId.bqDatasetId.bqTableId``. """ output_uri: str = proto.Field( @@ -191,11 +191,11 @@ class ContainerRegistryDestination(proto.Message): Google Container Registry and Artifact Registry are supported now. Accepted forms: - - Google Container Registry path. For example: - ``gcr.io/projectId/imageName:tag``. + - Google Container Registry path. For example: + ``gcr.io/projectId/imageName:tag``. - - Artifact Registry path. For example: - ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. + - Artifact Registry path. For example: + ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. If a tag is not specified, "latest" will be used as the default tag. diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py index 056dfdba75..1cb3b7f5a2 100644 --- a/google/cloud/aiplatform_v1/types/job_service.py +++ b/google/cloud/aiplatform_v1/types/job_service.py @@ -23,9 +23,7 @@ batch_prediction_job as gca_batch_prediction_job, ) from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import ( hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) @@ -143,24 +141,24 @@ class ListCustomJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -309,24 +307,24 @@ class ListDataLabelingJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -490,24 +488,24 @@ class ListHyperparameterTuningJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -663,24 +661,24 @@ class ListNasJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -912,26 +910,25 @@ class ListBatchPredictionJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``model_display_name`` supports ``=``, ``!=`` - comparisons. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``model_display_name`` supports ``=``, ``!=`` comparisons. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -1223,24 +1220,24 @@ class ListModelDeploymentMonitoringJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -1325,17 +1322,17 @@ class UpdateModelDeploymentMonitoringJobRequest(proto.Message): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` """ model_deployment_monitoring_job: ( diff --git a/google/cloud/aiplatform_v1/types/llm_utility_service.py b/google/cloud/aiplatform_v1/types/llm_utility_service.py index b45160534c..5109a57d22 100644 --- a/google/cloud/aiplatform_v1/types/llm_utility_service.py +++ b/google/cloud/aiplatform_v1/types/llm_utility_service.py @@ -49,7 +49,7 @@ class ComputeTokensRequest(proto.Message): model (str): Optional. The name of the publisher model requested to serve the prediction. Format: - projects/{project}/locations/{location}/publishers/\ */models/* + projects/{project}/locations/{location}/publishers/*/models/* contents (MutableSequence[google.cloud.aiplatform_v1.types.Content]): Optional. Input content. """ diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index 3b47081441..e6b28ce41c 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - accelerator_type as gca_accelerator_type, -) +from google.cloud.aiplatform_v1.types import accelerator_type as gca_accelerator_type from google.cloud.aiplatform_v1.types import ( reservation_affinity as gca_reservation_affinity, ) @@ -399,9 +397,9 @@ class AutoscalingMetricSpec(proto.Message): metric_name (str): Required. The resource metric name. Supported metrics: - - For Online Prediction: - - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` target (int): The target resource utilization in percentage (1% - 100%) for the given metric; once the real diff --git a/google/cloud/aiplatform_v1/types/metadata_service.py b/google/cloud/aiplatform_v1/types/metadata_service.py index 7fa1bda618..4726b19685 100644 --- a/google/cloud/aiplatform_v1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1/types/metadata_service.py @@ -23,12 +23,8 @@ from google.cloud.aiplatform_v1.types import context as gca_context from google.cloud.aiplatform_v1.types import event from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) -from google.cloud.aiplatform_v1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -351,25 +347,25 @@ class ListArtifactsRequest(proto.Message): define filter query is based on https://google.aip.dev/160. The supported set of filters include the following: - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` - - **Context based filtering**: To filter Artifacts based on - the contexts to which they belong, use the function - operator with the full resource name - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` In case the field + name contains special characters (such as colon), one can + embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` + - **Context based filtering**: To filter Artifacts based on + the contexts to which they belong, use the function + operator with the full resource name + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` Each of the above supported filter types can be combined together using logical operators (``AND`` & ``OR``). Maximum @@ -655,32 +651,31 @@ class ListContextsRequest(proto.Message): define filter query is based on https://google.aip.dev/160. Following are the supported set of filters: - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0``. In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` - - - **Parent Child filtering**: To filter Contexts based on - parent-child relationship use the HAS operator as - follows: - - :: - - parent_contexts: - "projects//locations//metadataStores//contexts/" - child_contexts: - "projects//locations//metadataStores//contexts/" + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such as + ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0``. In case the + field name contains special characters (such as colon), + one can embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` + + - **Parent Child filtering**: To filter Contexts based on + parent-child relationship use the HAS operator as follows: + + :: + + parent_contexts: + "projects//locations//metadataStores//contexts/" + child_contexts: + "projects//locations//metadataStores//contexts/" Each of the above supported filters can be combined together using logical operators (``AND`` & ``OR``). Maximum nested @@ -1101,25 +1096,25 @@ class ListExecutionsRequest(proto.Message): https://google.aip.dev/160. Following are the supported set of filters: - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``state``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..`` For example: - ``metadata.field_1.number_value = 10.0`` In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` - - **Context based filtering**: To filter Executions based - on the contexts to which they belong use the function - operator with the full resource name: - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``state``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such as + ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..`` For example: + ``metadata.field_1.number_value = 10.0`` In case the field + name contains special characters (such as colon), one can + embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` + - **Context based filtering**: To filter Executions based on + the contexts to which they belong use the function + operator with the full resource name: + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` Each of the above supported filters can be combined together using logical operators (``AND`` & ``OR``). Maximum nested @@ -1533,20 +1528,20 @@ class QueryArtifactLineageSubgraphRequest(proto.Message): https://google.aip.dev/160. The supported set of filters include the following: - - **Attribute filtering**: For example: - ``display_name = "test"`` Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` + - **Attribute filtering**: For example: + ``display_name = "test"`` Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` In case the field + name contains special characters (such as colon), one can + embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` Each of the above supported filter types can be combined together using logical operators (``AND`` & ``OR``). Maximum diff --git a/google/cloud/aiplatform_v1/types/metadata_store.py b/google/cloud/aiplatform_v1/types/metadata_store.py index 70adb59de0..eed1cc747c 100644 --- a/google/cloud/aiplatform_v1/types/metadata_store.py +++ b/google/cloud/aiplatform_v1/types/metadata_store.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/migratable_resource.py b/google/cloud/aiplatform_v1/types/migratable_resource.py index d91dee99f3..72d369b3c4 100644 --- a/google/cloud/aiplatform_v1/types/migratable_resource.py +++ b/google/cloud/aiplatform_v1/types/migratable_resource.py @@ -80,10 +80,10 @@ class MlEngineModelVersion(proto.Message): The ml.googleapis.com endpoint that this model Version currently lives in. Example values: - - ml.googleapis.com - - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com + - ml.googleapis.com + - us-centrall-ml.googleapis.com + - europe-west4-ml.googleapis.com + - asia-east1-ml.googleapis.com version (str): Full resource name of ml engine model Version. Format: ``projects/{project}/models/{model}/versions/{version}``. diff --git a/google/cloud/aiplatform_v1/types/migration_service.py b/google/cloud/aiplatform_v1/types/migration_service.py index 3e75344527..9d19366134 100644 --- a/google/cloud/aiplatform_v1/types/migration_service.py +++ b/google/cloud/aiplatform_v1/types/migration_service.py @@ -60,23 +60,22 @@ class SearchMigratableResourcesRequest(proto.Message): A filter for your search. You can use the following types of filters: - - Resource type filters. The following strings filter for a - specific type of - [MigratableResource][google.cloud.aiplatform.v1.MigratableResource]: - - - ``ml_engine_model_version:*`` - - ``automl_model:*`` - - ``automl_dataset:*`` - - ``data_labeling_dataset:*`` - - - "Migrated or not" filters. The following strings filter - for resources that either have or have not already been - migrated: - - - ``last_migrate_time:*`` filters for migrated - resources. - - ``NOT last_migrate_time:*`` filters for not yet - migrated resources. + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1.MigratableResource]: + + - ``ml_engine_model_version:*`` + - ``automl_model:*`` + - ``automl_dataset:*`` + - ``data_labeling_dataset:*`` + + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: + + - ``last_migrate_time:*`` filters for migrated resources. + - ``NOT last_migrate_time:*`` filters for not yet migrated + resources. """ parent: str = proto.Field( @@ -200,13 +199,13 @@ class MigrateMlEngineModelVersionConfig(proto.Message): Required. The ml.googleapis.com endpoint that this model version should be migrated from. Example values: - - ml.googleapis.com + - ml.googleapis.com - - us-centrall-ml.googleapis.com + - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com + - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com + - asia-east1-ml.googleapis.com model_version (str): Required. Full resource name of ml engine model version. Format: diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index 3c65e62f64..60315bc23e 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import explanation from google.protobuf import duration_pb2 # type: ignore @@ -161,31 +159,31 @@ class Model(proto.Message): The possible formats are: - - ``jsonl`` The JSON Lines format, where each instance is a - single line. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + - ``jsonl`` The JSON Lines format, where each instance is a + single line. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - ``csv`` The CSV format, where each instance is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + - ``csv`` The CSV format, where each instance is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - ``tf-record`` The TFRecord format, where each instance is - a single record in tfrecord syntax. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + - ``tf-record`` The TFRecord format, where each instance is + a single record in tfrecord syntax. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - ``tf-record-gzip`` Similar to ``tf-record``, but the file - is gzipped. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + - ``tf-record-gzip`` Similar to ``tf-record``, but the file + is gzipped. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - ``bigquery`` Each instance is a single row in BigQuery. - Uses - [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source]. + - ``bigquery`` Each instance is a single row in BigQuery. + Uses + [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source]. - - ``file-list`` Each line of the file is the location of an - instance to process, uses ``gcs_source`` field of the - [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] - object. + - ``file-list`` Each line of the file is the location of an + instance to process, uses ``gcs_source`` field of the + [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] + object. If this Model doesn't support any of these formats it means it cannot be used with a @@ -210,19 +208,19 @@ class Model(proto.Message): The possible formats are: - - ``jsonl`` The JSON Lines format, where each prediction is - a single line. Uses - [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. + - ``jsonl`` The JSON Lines format, where each prediction is + a single line. Uses + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - - ``csv`` The CSV format, where each prediction is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. + - ``csv`` The CSV format, where each prediction is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - - ``bigquery`` Each prediction is a single row in a - BigQuery table, uses - [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination] - . + - ``bigquery`` Each prediction is a single row in a BigQuery + table, uses + [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination] + . If this Model doesn't support any of these formats it means it cannot be used with a @@ -366,23 +364,23 @@ class ExportFormat(proto.Message): Output only. The ID of the export format. The possible format IDs are: - - ``tflite`` Used for Android mobile devices. + - ``tflite`` Used for Android mobile devices. - - ``edgetpu-tflite`` Used for `Edge - TPU `__ devices. + - ``edgetpu-tflite`` Used for `Edge + TPU `__ devices. - - ``tf-saved-model`` A tensorflow model in SavedModel - format. + - ``tf-saved-model`` A tensorflow model in SavedModel + format. - - ``tf-js`` A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. + - ``tf-js`` A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. - - ``core-ml`` Used for iOS mobile devices. + - ``core-ml`` Used for iOS mobile devices. - - ``custom-trained`` A Model that was uploaded or trained - by custom code. + - ``custom-trained`` A Model that was uploaded or trained by + custom code. exportable_contents (MutableSequence[google.cloud.aiplatform_v1.types.Model.ExportFormat.ExportableContent]): Output only. The content of this Model that may be exported. @@ -1018,19 +1016,19 @@ class ModelContainerSpec(proto.Message): /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) health_route (str): Immutable. HTTP path on the container to send health checks to. Vertex AI intermittently sends GET requests to this path @@ -1051,22 +1049,22 @@ class ModelContainerSpec(proto.Message): /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) invoke_route_prefix (str): Immutable. Invoke route prefix for the custom container. - "/*" is the only supported value right now. By setting this + "/\*" is the only supported value right now. By setting this field, any non-root route on this model will be accessible with invoke http call eg: "/invoke/foo/bar", however the [PredictionService.Invoke] RPC is not supported yet. @@ -1324,7 +1322,7 @@ class ExecAction(proto.Message): the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you need to + ('\|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. """ diff --git a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py index 3ca2a22825..f725cbdb3f 100644 --- a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import feature_monitoring_stats from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import job_state diff --git a/google/cloud/aiplatform_v1/types/model_evaluation.py b/google/cloud/aiplatform_v1/types/model_evaluation.py index 89d0fa0898..80485ab2c3 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation.py @@ -116,8 +116,8 @@ class ModelEvaluationExplanationSpec(proto.Message): For AutoML Image Classification models, possible values are: - - ``image-integrated-gradients`` - - ``image-xrai`` + - ``image-integrated-gradients`` + - ``image-xrai`` explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): Explanation spec details. """ diff --git a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py index 2bb781c866..6ce7041641 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py @@ -75,13 +75,13 @@ class Slice(proto.Message): Output only. The dimension of the slice. Well-known dimensions are: - - ``annotationSpec``: This slice is on the test data that - has either ground truth or prediction with - [AnnotationSpec.display_name][google.cloud.aiplatform.v1.AnnotationSpec.display_name] - equals to - [value][google.cloud.aiplatform.v1.ModelEvaluationSlice.Slice.value]. - - ``slice``: This slice is a user customized slice defined - by its SliceSpec. + - ``annotationSpec``: This slice is on the test data that + has either ground truth or prediction with + [AnnotationSpec.display_name][google.cloud.aiplatform.v1.AnnotationSpec.display_name] + equals to + [value][google.cloud.aiplatform.v1.ModelEvaluationSlice.Slice.value]. + - ``slice``: This slice is a user customized slice defined + by its SliceSpec. value (str): Output only. The value of the dimension in this slice. diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index 92176fd9e7..32ba35a7dc 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -19,16 +19,12 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import evaluated_annotation from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -214,25 +210,25 @@ class ListModelsRequest(proto.Message): An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. - - ``model`` supports = and !=. ``model`` represents the - Model ID, i.e. the last segment of the Model's [resource - name][google.cloud.aiplatform.v1.Model.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: + - ``model`` supports = and !=. ``model`` represents the + Model ID, i.e. the last segment of the Model's [resource + name][google.cloud.aiplatform.v1.Model.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. - - ``base_model_name`` only supports = + - ``base_model_name`` only supports = Some examples: - - ``model=1234`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - - ``baseModelName="text-bison"`` + - ``model=1234`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``baseModelName="text-bison"`` page_size (int): The standard list page size. page_token (str): @@ -248,9 +244,9 @@ class ListModelsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. """ @@ -330,16 +326,16 @@ class ListModelVersionsRequest(proto.Message): An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. - - ``labels`` supports general map functions that is: + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. Some examples: - - ``labels.myKey="myValue"`` + - ``labels.myKey="myValue"`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. order_by (str): @@ -347,8 +343,8 @@ class ListModelVersionsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` Example: ``update_time asc, create_time desc``. """ @@ -1144,7 +1140,7 @@ class ListModelEvaluationSlicesRequest(proto.Message): filter (str): The standard list filter. - - ``slice.dimension`` - for =. + - ``slice.dimension`` - for =. page_size (int): The standard list page size. page_token (str): diff --git a/google/cloud/aiplatform_v1/types/nas_job.py b/google/cloud/aiplatform_v1/types/nas_job.py index 07a7a7f7e9..13fa3b82cc 100644 --- a/google/cloud/aiplatform_v1/types/nas_job.py +++ b/google/cloud/aiplatform_v1/types/nas_job.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import study from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/notebook_execution_job.py b/google/cloud/aiplatform_v1/types/notebook_execution_job.py index 13f5e8b49d..fd39bd3c92 100644 --- a/google/cloud/aiplatform_v1/types/notebook_execution_job.py +++ b/google/cloud/aiplatform_v1/types/notebook_execution_job.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state as gca_job_state from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import network_spec as gca_network_spec diff --git a/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py b/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py index 8ba861911a..f1fbb5595e 100644 --- a/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py +++ b/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py @@ -42,7 +42,7 @@ class NotebookIdleShutdownConfig(proto.Message): - - 60. + 60. idle_shutdown_disabled (bool): Whether Idle Shutdown is disabled in this NotebookRuntimeTemplate. diff --git a/google/cloud/aiplatform_v1/types/notebook_runtime.py b/google/cloud/aiplatform_v1/types/notebook_runtime.py index a4bb41a181..8f030fb1bc 100644 --- a/google/cloud/aiplatform_v1/types/notebook_runtime.py +++ b/google/cloud/aiplatform_v1/types/notebook_runtime.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import network_spec as gca_network_spec from google.cloud.aiplatform_v1.types import notebook_euc_config @@ -319,12 +317,12 @@ class NotebookRuntime(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for NotebookRuntime: - - "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": - output only, its value is the Compute Engine instance id. - - "aiplatform.googleapis.com/colab_enterprise_entry_service": - its value is either "bigquery" or "vertex"; if absent, it - should be "vertex". This is to describe the entry - service, either BigQuery or Vertex. + - "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": + output only, its value is the Compute Engine instance id. + - "aiplatform.googleapis.com/colab_enterprise_entry_service": + its value is either "bigquery" or "vertex"; if absent, it + should be "vertex". This is to describe the entry service, + either BigQuery or Vertex. expiration_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this NotebookRuntime will be expired: diff --git a/google/cloud/aiplatform_v1/types/notebook_service.py b/google/cloud/aiplatform_v1/types/notebook_service.py index 99c9395d5b..6cf9ed2394 100644 --- a/google/cloud/aiplatform_v1/types/notebook_service.py +++ b/google/cloud/aiplatform_v1/types/notebook_service.py @@ -22,9 +22,7 @@ from google.cloud.aiplatform_v1.types import ( notebook_execution_job as gca_notebook_execution_job, ) -from google.cloud.aiplatform_v1.types import ( - notebook_runtime as gca_notebook_runtime, -) +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -165,32 +163,32 @@ class ListNotebookRuntimeTemplatesRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``notebookRuntimeTemplate`` supports = and !=. - ``notebookRuntimeTemplate`` represents the - NotebookRuntimeTemplate ID, i.e. the last segment of the - NotebookRuntimeTemplate's [resource name] - [google.cloud.aiplatform.v1.NotebookRuntimeTemplate.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1.NotebookRuntimeTemplate.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. - - ``notebookRuntimeType`` supports = and !=. - notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. - - ``machineType`` supports = and !=. - - ``acceleratorType`` supports = and !=. + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + - ``machineType`` supports = and !=. + - ``acceleratorType`` supports = and !=. Some examples: - - ``notebookRuntimeTemplate=notebookRuntimeTemplate123`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - - ``notebookRuntimeType=USER_DEFINED`` - - ``machineType=e2-standard-4`` - - ``acceleratorType=NVIDIA_TESLA_T4`` + - ``notebookRuntimeTemplate=notebookRuntimeTemplate123`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``notebookRuntimeType=USER_DEFINED`` + - ``machineType=e2-standard-4`` + - ``acceleratorType=NVIDIA_TESLA_T4`` page_size (int): Optional. The standard list page size. page_token (str): @@ -208,9 +206,9 @@ class ListNotebookRuntimeTemplatesRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. """ @@ -305,7 +303,7 @@ class UpdateNotebookRuntimeTemplateRequest(proto.Message): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` """ notebook_runtime_template: gca_notebook_runtime.NotebookRuntimeTemplate = ( @@ -421,46 +419,45 @@ class ListNotebookRuntimesRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``notebookRuntime`` supports = and !=. - ``notebookRuntime`` represents the NotebookRuntime ID, - i.e. the last segment of the NotebookRuntime's [resource - name] [google.cloud.aiplatform.v1.NotebookRuntime.name]. - - ``displayName`` supports = and != and regex. - - ``notebookRuntimeTemplate`` supports = and !=. - ``notebookRuntimeTemplate`` represents the - NotebookRuntimeTemplate ID, i.e. the last segment of the - NotebookRuntimeTemplate's [resource name] - [google.cloud.aiplatform.v1.NotebookRuntimeTemplate.name]. - - ``healthState`` supports = and !=. healthState enum: - [HEALTHY, UNHEALTHY, HEALTH_STATE_UNSPECIFIED]. - - ``runtimeState`` supports = and !=. runtimeState enum: - [RUNTIME_STATE_UNSPECIFIED, RUNNING, BEING_STARTED, - BEING_STOPPED, STOPPED, BEING_UPGRADED, ERROR, INVALID]. - - ``runtimeUser`` supports = and !=. - - API version is UI only: ``uiState`` supports = and !=. - uiState enum: [UI_RESOURCE_STATE_UNSPECIFIED, - UI_RESOURCE_STATE_BEING_CREATED, - UI_RESOURCE_STATE_ACTIVE, - UI_RESOURCE_STATE_BEING_DELETED, - UI_RESOURCE_STATE_CREATION_FAILED]. - - ``notebookRuntimeType`` supports = and !=. - notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. - - ``machineType`` supports = and !=. - - ``acceleratorType`` supports = and !=. + - ``notebookRuntime`` supports = and !=. ``notebookRuntime`` + represents the NotebookRuntime ID, i.e. the last segment + of the NotebookRuntime's [resource name] + [google.cloud.aiplatform.v1.NotebookRuntime.name]. + - ``displayName`` supports = and != and regex. + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1.NotebookRuntimeTemplate.name]. + - ``healthState`` supports = and !=. healthState enum: + [HEALTHY, UNHEALTHY, HEALTH_STATE_UNSPECIFIED]. + - ``runtimeState`` supports = and !=. runtimeState enum: + [RUNTIME_STATE_UNSPECIFIED, RUNNING, BEING_STARTED, + BEING_STOPPED, STOPPED, BEING_UPGRADED, ERROR, INVALID]. + - ``runtimeUser`` supports = and !=. + - API version is UI only: ``uiState`` supports = and !=. + uiState enum: [UI_RESOURCE_STATE_UNSPECIFIED, + UI_RESOURCE_STATE_BEING_CREATED, UI_RESOURCE_STATE_ACTIVE, + UI_RESOURCE_STATE_BEING_DELETED, + UI_RESOURCE_STATE_CREATION_FAILED]. + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + - ``machineType`` supports = and !=. + - ``acceleratorType`` supports = and !=. Some examples: - - ``notebookRuntime="notebookRuntime123"`` - - ``displayName="myDisplayName"`` and - ``displayName=~"myDisplayNameRegex"`` - - ``notebookRuntimeTemplate="notebookRuntimeTemplate321"`` - - ``healthState=HEALTHY`` - - ``runtimeState=RUNNING`` - - ``runtimeUser="test@google.com"`` - - ``uiState=UI_RESOURCE_STATE_BEING_DELETED`` - - ``notebookRuntimeType=USER_DEFINED`` - - ``machineType=e2-standard-4`` - - ``acceleratorType=NVIDIA_TESLA_T4`` + - ``notebookRuntime="notebookRuntime123"`` + - ``displayName="myDisplayName"`` and + ``displayName=~"myDisplayNameRegex"`` + - ``notebookRuntimeTemplate="notebookRuntimeTemplate321"`` + - ``healthState=HEALTHY`` + - ``runtimeState=RUNNING`` + - ``runtimeUser="test@google.com"`` + - ``uiState=UI_RESOURCE_STATE_BEING_DELETED`` + - ``notebookRuntimeType=USER_DEFINED`` + - ``machineType=e2-standard-4`` + - ``acceleratorType=NVIDIA_TESLA_T4`` page_size (int): Optional. The standard list page size. page_token (str): @@ -478,9 +475,9 @@ class ListNotebookRuntimesRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. """ @@ -800,18 +797,18 @@ class ListNotebookExecutionJobsRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``notebookExecutionJob`` supports = and !=. - ``notebookExecutionJob`` represents the - NotebookExecutionJob ID. - - ``displayName`` supports = and != and regex. - - ``schedule`` supports = and != and regex. + - ``notebookExecutionJob`` supports = and !=. + ``notebookExecutionJob`` represents the + NotebookExecutionJob ID. + - ``displayName`` supports = and != and regex. + - ``schedule`` supports = and != and regex. Some examples: - - ``notebookExecutionJob="123"`` - - ``notebookExecutionJob="my-execution-job"`` - - ``displayName="myDisplayName"`` and - ``displayName=~"myDisplayNameRegex"`` + - ``notebookExecutionJob="123"`` + - ``notebookExecutionJob="my-execution-job"`` + - ``displayName="myDisplayName"`` and + ``displayName=~"myDisplayNameRegex"`` page_size (int): Optional. The standard list page size. page_token (str): @@ -826,9 +823,9 @@ class ListNotebookExecutionJobsRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. view (google.cloud.aiplatform_v1.types.NotebookExecutionJobView): diff --git a/google/cloud/aiplatform_v1/types/persistent_resource.py b/google/cloud/aiplatform_v1/types/persistent_resource.py index f456405a19..1b5e2f1cd8 100644 --- a/google/cloud/aiplatform_v1/types/persistent_resource.py +++ b/google/cloud/aiplatform_v1/types/persistent_resource.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import service_networking from google.protobuf import timestamp_pb2 # type: ignore @@ -345,8 +343,8 @@ class ResourceRuntimeSpec(proto.Message): r"""Configuration for the runtime on a PersistentResource instance, including but not limited to: - - Service accounts used to run the workloads. - - Whether to make it a dedicated Ray Cluster. + - Service accounts used to run the workloads. + - Whether to make it a dedicated Ray Cluster. Attributes: service_account_spec (google.cloud.aiplatform_v1.types.ServiceAccountSpec): @@ -462,9 +460,9 @@ class ServiceAccountSpec(proto.Message): service_account (str): Optional. Required when all below conditions are met - - ``enable_custom_service_account`` is true; - - any runtime is specified via ``ResourceRuntimeSpec`` on - creation time, for example, Ray + - ``enable_custom_service_account`` is true; + - any runtime is specified via ``ResourceRuntimeSpec`` on + creation time, for example, Ray The users must have ``iam.serviceAccounts.actAs`` permission on this service account and then the specified runtime diff --git a/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py index 3c539888e2..4dcca2448d 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -21,9 +21,7 @@ from google.cloud.aiplatform_v1.types import artifact from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import pipeline_failure_policy from google.cloud.aiplatform_v1.types import pipeline_state @@ -92,8 +90,8 @@ class PipelineJob(proto.Message): Note there is some reserved label key for Vertex AI Pipelines. - - ``vertex-ai-pipelines-run-billing-id``, user set value - will get overrided. + - ``vertex-ai-pipelines-run-billing-id``, user set value + will get overrided. runtime_config (google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig): Runtime config of the pipeline. encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index 4281bc0b4b..ce8ab51284 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -21,9 +21,7 @@ from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import field_mask_pb2 # type: ignore @@ -121,25 +119,25 @@ class ListTrainingPipelinesRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``training_task_definition`` ``=``, ``!=`` comparisons, - and ``:`` wildcard. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``training_task_definition`` ``=``, ``!=`` comparisons, + and ``:`` wildcard. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name:"my_pipeline_*"`` - - ``state!="PIPELINE_STATE_FAILED" OR display_name="my_pipeline"`` - - ``NOT display_name="my_pipeline"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``training_task_definition:"*automl_text_classification*"`` + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name:"my_pipeline_*"`` + - ``state!="PIPELINE_STATE_FAILED" OR display_name="my_pipeline"`` + - ``NOT display_name="my_pipeline"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``training_task_definition:"*automl_text_classification*"`` page_size (int): The standard list page size. page_token (str): @@ -304,28 +302,28 @@ class ListPipelineJobsRequest(proto.Message): Lists the PipelineJobs that match the filter expression. The following fields are supported: - - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. - - ``display_name``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``pipeline_job_user_id``: Supports ``=``, ``!=`` - comparisons, and ``:`` wildcard. for example, can check - if pipeline's display_name contains *step* by doing - display_name:"*step*" - - ``state``: Supports ``=`` and ``!=`` comparisons. - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. - - ``template_uri``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``template_metadata.version``: Supports ``=``, ``!=`` - comparisons, and ``:`` wildcard. + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``pipeline_job_user_id``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. for example, can check if + pipeline's display_name contains *step* by doing + display_name:"*step*" + - ``state``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality and key presence. + - ``template_uri``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``template_metadata.version``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. Filter expressions can be combined together using logical operators (``AND`` & ``OR``). For example: @@ -336,11 +334,11 @@ class ListPipelineJobsRequest(proto.Message): Examples: - - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` - PipelineJobs created or updated after 2020-05-18 00:00:00 - UTC. - - ``labels.env = "prod"`` PipelineJobs with label "env" set - to "prod". + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". page_size (int): The standard list page size. page_token (str): @@ -362,10 +360,10 @@ class ListPipelineJobsRequest(proto.Message): default order is create time in descending order. Supported fields: - - ``create_time`` - - ``update_time`` - - ``end_time`` - - ``start_time`` + - ``create_time`` + - ``update_time`` + - ``end_time`` + - ``start_time`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py index 21035103fe..60176892c5 100644 --- a/google/cloud/aiplatform_v1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1/types/prediction_service.py @@ -603,11 +603,11 @@ class ExplainRequest(proto.Message): of the DeployedModel. Can be used for explaining prediction results with different configurations, such as: - - Explaining top-5 predictions results as opposed to top-1; - - Increasing path count or step count of the attribution - methods to reduce approximate errors; - - Using different baselines for explaining the prediction - results. + - Explaining top-5 predictions results as opposed to top-1; + - Increasing path count or step count of the attribution + methods to reduce approximate errors; + - Using different baselines for explaining the prediction + results. deployed_model_id (str): If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding diff --git a/google/cloud/aiplatform_v1/types/reasoning_engine.py b/google/cloud/aiplatform_v1/types/reasoning_engine.py index b8c8de23f9..b719284690 100644 --- a/google/cloud/aiplatform_v1/types/reasoning_engine.py +++ b/google/cloud/aiplatform_v1/types/reasoning_engine.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import service_networking from google.protobuf import struct_pb2 # type: ignore @@ -150,13 +148,13 @@ class DeploymentSpec(proto.Message): 'memory' keys are supported. Defaults to {"cpu": "4", "memory": "4Gi"}. - - The only supported values for CPU are '1', '2', '4', '6' - and '8'. For more information, go to - https://cloud.google.com/run/docs/configuring/cpu. - - The only supported values for memory are '1Gi', '2Gi', - ... '32 Gi'. - - For required cpu on different memory values, go to - https://cloud.google.com/run/docs/configuring/memory-limits + - The only supported values for CPU are '1', '2', '4', '6' + and '8'. For more information, go to + https://cloud.google.com/run/docs/configuring/cpu. + - The only supported values for memory are '1Gi', '2Gi', ... + '32 Gi'. + - For required cpu on different memory values, go to + https://cloud.google.com/run/docs/configuring/memory-limits container_concurrency (int): Optional. Concurrency for each container and agent server. Recommended value: 2 \* cpu + 1. Defaults to 9. diff --git a/google/cloud/aiplatform_v1/types/reasoning_engine_service.py b/google/cloud/aiplatform_v1/types/reasoning_engine_service.py index 03cd623b0f..6b6685ea09 100644 --- a/google/cloud/aiplatform_v1/types/reasoning_engine_service.py +++ b/google/cloud/aiplatform_v1/types/reasoning_engine_service.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import operation -from google.cloud.aiplatform_v1.types import ( - reasoning_engine as gca_reasoning_engine, -) +from google.cloud.aiplatform_v1.types import reasoning_engine as gca_reasoning_engine from google.protobuf import field_mask_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/saved_query.py b/google/cloud/aiplatform_v1/types/saved_query.py index c807251d18..fa9727a5da 100644 --- a/google/cloud/aiplatform_v1/types/saved_query.py +++ b/google/cloud/aiplatform_v1/types/saved_query.py @@ -57,16 +57,16 @@ class SavedQuery(proto.Message): problem_type (str): Required. Problem type of the SavedQuery. Allowed values: - - IMAGE_CLASSIFICATION_SINGLE_LABEL - - IMAGE_CLASSIFICATION_MULTI_LABEL - - IMAGE_BOUNDING_POLY - - IMAGE_BOUNDING_BOX - - TEXT_CLASSIFICATION_SINGLE_LABEL - - TEXT_CLASSIFICATION_MULTI_LABEL - - TEXT_EXTRACTION - - TEXT_SENTIMENT - - VIDEO_CLASSIFICATION - - VIDEO_OBJECT_TRACKING + - IMAGE_CLASSIFICATION_SINGLE_LABEL + - IMAGE_CLASSIFICATION_MULTI_LABEL + - IMAGE_BOUNDING_POLY + - IMAGE_BOUNDING_BOX + - TEXT_CLASSIFICATION_SINGLE_LABEL + - TEXT_CLASSIFICATION_MULTI_LABEL + - TEXT_EXTRACTION + - TEXT_SENTIMENT + - VIDEO_CLASSIFICATION + - VIDEO_OBJECT_TRACKING annotation_spec_count (int): Output only. Number of AnnotationSpecs in the context of the SavedQuery. diff --git a/google/cloud/aiplatform_v1/types/schedule_service.py b/google/cloud/aiplatform_v1/types/schedule_service.py index b37e17ce9e..cfa8414c73 100644 --- a/google/cloud/aiplatform_v1/types/schedule_service.py +++ b/google/cloud/aiplatform_v1/types/schedule_service.py @@ -91,24 +91,24 @@ class ListSchedulesRequest(proto.Message): Lists the Schedules that match the filter expression. The following fields are supported: - - ``display_name``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state``: Supports ``=`` and ``!=`` comparisons. - - ``request``: Supports existence of the - check. (e.g. ``create_pipeline_job_request:*`` --> - Schedule has create_pipeline_job_request). - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``start_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, ``>=`` comparisons and ``:*`` existence check. - Values must be in RFC 3339 format. - - ``next_run_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state``: Supports ``=`` and ``!=`` comparisons. + - ``request``: Supports existence of the + check. (e.g. ``create_pipeline_job_request:*`` --> + Schedule has create_pipeline_job_request). + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``start_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, ``>=`` comparisons and ``:*`` existence check. + Values must be in RFC 3339 format. + - ``next_run_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. Filter expressions can be combined together using logical operators (``NOT``, ``AND`` & ``OR``). The syntax to define @@ -116,11 +116,11 @@ class ListSchedulesRequest(proto.Message): Examples: - - ``state="ACTIVE" AND display_name:"my_schedule_*"`` - - ``NOT display_name="my_schedule"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``end_time>"2021-05-18T00:00:00Z" OR NOT end_time:*`` - - ``create_pipeline_job_request:*`` + - ``state="ACTIVE" AND display_name:"my_schedule_*"`` + - ``NOT display_name="my_schedule"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``end_time>"2021-05-18T00:00:00Z" OR NOT end_time:*`` + - ``create_pipeline_job_request:*`` page_size (int): The standard list page size. Default to 100 if not specified. @@ -146,10 +146,10 @@ class ListSchedulesRequest(proto.Message): Supported fields: - - ``create_time`` - - ``start_time`` - - ``end_time`` - - ``next_run_time`` + - ``create_time`` + - ``start_time`` + - ``end_time`` + - ``next_run_time`` """ parent: str = proto.Field( @@ -273,9 +273,9 @@ class UpdateScheduleRequest(proto.Message): Required. The Schedule which replaces the resource on the server. The following restrictions will be applied: - - The scheduled request type cannot be changed. - - The non-empty fields cannot be unset. - - The output_only fields will be ignored if specified. + - The scheduled request type cannot be changed. + - The non-empty fields cannot be unset. + - The output_only fields will be ignored if specified. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See [google.protobuf.FieldMask][google.protobuf.FieldMask]. diff --git a/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1/types/specialist_pool_service.py index b3dbcf6b76..b3e0533e83 100644 --- a/google/cloud/aiplatform_v1/types/specialist_pool_service.py +++ b/google/cloud/aiplatform_v1/types/specialist_pool_service.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import operation -from google.cloud.aiplatform_v1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool from google.protobuf import field_mask_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/tensorboard.py b/google/cloud/aiplatform_v1/types/tensorboard.py index 692e17eb10..23dad29b79 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard.py +++ b/google/cloud/aiplatform_v1/types/tensorboard.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/tensorboard_experiment.py b/google/cloud/aiplatform_v1/types/tensorboard_experiment.py index 0a6270c045..dea6536f6d 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_experiment.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_experiment.py @@ -65,10 +65,10 @@ class TensorboardExperiment(proto.Message): ``aiplatform.googleapis.com/`` and are immutable. The following system labels exist for each Dataset: - - ``aiplatform.googleapis.com/dataset_metadata_schema``: - output only. Its value is the - [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - title. + - ``aiplatform.googleapis.com/dataset_metadata_schema``: + output only. Its value is the + [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + title. etag (str): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update diff --git a/google/cloud/aiplatform_v1/types/tensorboard_service.py b/google/cloud/aiplatform_v1/types/tensorboard_service.py index a34aec1070..bbcb49f0e2 100644 --- a/google/cloud/aiplatform_v1/types/tensorboard_service.py +++ b/google/cloud/aiplatform_v1/types/tensorboard_service.py @@ -25,9 +25,7 @@ from google.cloud.aiplatform_v1.types import ( tensorboard_experiment as gca_tensorboard_experiment, ) -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import ( tensorboard_time_series as gca_tensorboard_time_series, ) diff --git a/google/cloud/aiplatform_v1/types/tool.py b/google/cloud/aiplatform_v1/types/tool.py index 9dbb7e1891..35497bcddd 100644 --- a/google/cloud/aiplatform_v1/types/tool.py +++ b/google/cloud/aiplatform_v1/types/tool.py @@ -676,8 +676,17 @@ class GoogleMaps(proto.Message): r"""Tool to retrieve public maps data for grounding, powered by Google. + Attributes: + enable_widget (bool): + If true, include the widget context token in + the response. """ + enable_widget: bool = proto.Field( + proto.BOOL, + number=1, + ) + class EnterpriseWebSearch(proto.Message): r"""Tool to search public web data, powered by Vertex AI Search diff --git a/google/cloud/aiplatform_v1/types/training_pipeline.py b/google/cloud/aiplatform_v1/types/training_pipeline.py index 6b3a4d4d66..690c31494b 100644 --- a/google/cloud/aiplatform_v1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import pipeline_state @@ -302,19 +300,19 @@ class InputDataConfig(proto.Message): The Vertex AI environment variables representing Cloud Storage data URIs are represented in the Cloud Storage wildcard format to support sharded data. e.g.: - "gs://.../training-*.jsonl" + "gs://.../training-\*.jsonl" - - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for - tabular data + - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for + tabular data - - AIP_TRAINING_DATA_URI = - "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" + - AIP_TRAINING_DATA_URI = + "gcs_destination/dataset---/training-\*.${AIP_DATA_FORMAT}" - - AIP_VALIDATION_DATA_URI = - "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" + - AIP_VALIDATION_DATA_URI = + "gcs_destination/dataset---/validation-\*.${AIP_DATA_FORMAT}" - - AIP_TEST_DATA_URI = - "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". + - AIP_TEST_DATA_URI = + "gcs_destination/dataset---/test-\*.${AIP_DATA_FORMAT}". This field is a member of `oneof`_ ``destination``. bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): @@ -330,16 +328,16 @@ class InputDataConfig(proto.Message): dataset three tables are created, ``training``, ``validation`` and ``test``. - - AIP_DATA_FORMAT = "bigquery". + - AIP_DATA_FORMAT = "bigquery". - - AIP_TRAINING_DATA_URI = - "bigquery_destination.dataset\_\ **\ .training" + - AIP_TRAINING_DATA_URI = + "bigquery_destination.dataset\_\ **\ .training" - - AIP_VALIDATION_DATA_URI = - "bigquery_destination.dataset\_\ **\ .validation" + - AIP_VALIDATION_DATA_URI = + "bigquery_destination.dataset\_\ **\ .validation" - - AIP_TEST_DATA_URI = - "bigquery_destination.dataset\_\ **\ .test". + - AIP_TEST_DATA_URI = + "bigquery_destination.dataset\_\ **\ .test". This field is a member of `oneof`_ ``destination``. dataset_id (str): diff --git a/google/cloud/aiplatform_v1/types/tuning_job.py b/google/cloud/aiplatform_v1/types/tuning_job.py index 33f6545b04..615ad83d78 100644 --- a/google/cloud/aiplatform_v1/types/tuning_job.py +++ b/google/cloud/aiplatform_v1/types/tuning_job.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import content -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import job_state from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1/types/vertex_rag_data.py b/google/cloud/aiplatform_v1/types/vertex_rag_data.py index 334a5b5425..67c9ae3810 100644 --- a/google/cloud/aiplatform_v1/types/vertex_rag_data.py +++ b/google/cloud/aiplatform_v1/types/vertex_rag_data.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1.types import api_auth as gca_api_auth -from google.cloud.aiplatform_v1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1.types import io from google.protobuf import timestamp_pb2 # type: ignore @@ -737,8 +735,8 @@ class LayoutParser(proto.Message): ``additional_config.parse_as_scanned_pdf`` field must be false. Format: - - ``projects/{project_id}/locations/{location}/processors/{processor_id}`` - - ``projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}`` + - ``projects/{project_id}/locations/{location}/processors/{processor_id}`` + - ``projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}`` max_parsing_requests_per_min (int): The maximum number of requests the job is allowed to make to the Document AI processor per @@ -765,7 +763,7 @@ class LlmParser(proto.Message): model_name (str): The name of a LLM model used for parsing. Format: - - ``projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}`` + - ``projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}`` max_parsing_requests_per_min (int): The maximum number of requests the job is allowed to make to the LLM model per minute. @@ -838,8 +836,8 @@ class ImportRagFilesConfig(proto.Message): files as well as entire Google Cloud Storage directories. Sample formats: - - ``gs://bucket_name/my_directory/object_name/my_file.txt`` - - ``gs://bucket_name/my_directory`` + - ``gs://bucket_name/my_directory/object_name/my_file.txt`` + - ``gs://bucket_name/my_directory`` This field is a member of `oneof`_ ``import_source``. google_drive_source (google.cloud.aiplatform_v1.types.GoogleDriveSource): @@ -1031,10 +1029,10 @@ class Basic(proto.Message): r"""Basic tier is a cost-effective and low compute tier suitable for the following cases: - - Experimenting with RagManagedDb. - - Small data size. - - Latency insensitive workload. - - Only using RAG Engine with external vector DBs. + - Experimenting with RagManagedDb. + - Small data size. + - Latency insensitive workload. + - Only using RAG Engine with external vector DBs. NOTE: This is the default tier if not explicitly chosen. diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 1d3a2ceb0d..b47d3921a9 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -1207,6 +1207,7 @@ from .types.tuning_job import EvaluateDatasetRun from .types.tuning_job import EvaluationConfig from .types.tuning_job import PartnerModelTuningSpec +from .types.tuning_job import PreTunedModel from .types.tuning_job import SupervisedHyperParameters from .types.tuning_job import SupervisedTuningDatasetDistribution from .types.tuning_job import SupervisedTuningDataStats @@ -2128,6 +2129,7 @@ "PointwiseMetricSpec", "Port", "PostStartupScriptConfig", + "PreTunedModel", "PrebuiltVoiceConfig", "PredefinedSplit", "PredictLongRunningMetadata", diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index a0c1fbf5ab..c69e57edea 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -55,9 +55,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import saved_query @@ -647,9 +645,9 @@ async def sample_update_dataset(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1466,7 +1464,7 @@ async def sample_update_dataset_version(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index d38da9faa6..0b3a82f7e3 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -71,9 +71,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import saved_query @@ -827,11 +825,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = DatasetServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DatasetServiceClient._read_environment_variables() + ) self._client_cert_source = DatasetServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1270,9 +1266,9 @@ def sample_update_dataset(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2071,7 +2067,7 @@ def sample_update_dataset_version(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py index 8890fe7ce7..b0f2c39a7d 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -33,9 +33,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py index 75677d0c83..22aec8c968 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -36,9 +36,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py index de272406e5..c3678e1c73 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -39,9 +39,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py index b8d42d3001..db2aafbc7d 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py @@ -42,9 +42,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_asyncio.py index 44cbd37757..fe09d45c00 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_asyncio.py @@ -55,9 +55,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.longrunning import operations_pb2 # type: ignore @@ -4668,11 +4666,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_restore_dataset_version(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_restore_dataset_version_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_restore_dataset_version_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_base.py index ca1aafa227..023b496b99 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest_base.py @@ -32,9 +32,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py index 40a059ff6c..aba2e8fc09 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py @@ -54,9 +54,7 @@ from google.cloud.aiplatform_v1beta1.types import ( deployment_resource_pool as gca_deployment_resource_pool, ) -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import endpoint from google.cloud.aiplatform_v1beta1.types import machine_resources diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py index 374967bb7b..ac5c233178 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/client.py @@ -70,9 +70,7 @@ from google.cloud.aiplatform_v1beta1.types import ( deployment_resource_pool as gca_deployment_resource_pool, ) -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import endpoint from google.cloud.aiplatform_v1beta1.types import machine_resources @@ -714,11 +712,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = DeploymentResourcePoolServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DeploymentResourcePoolServiceClient._read_environment_variables() + ) self._client_cert_source = ( DeploymentResourcePoolServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/pagers.py index c65c306d13..12f860b499 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/pagers.py @@ -38,9 +38,7 @@ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.cloud.aiplatform_v1beta1.types import endpoint diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py index 3c3536fb1b..d1979a7988 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/base.py @@ -29,9 +29,7 @@ import google.protobuf from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py index 2d7de93c0c..fb9680b3c3 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc.py @@ -32,9 +32,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py index 1ff5718ba9..2787a3332b 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/grpc_asyncio.py @@ -35,9 +35,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py index a72be893e6..af2e8bc682 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.longrunning import operations_pb2 # type: ignore @@ -3078,11 +3076,10 @@ def __call__( resp = self._interceptor.post_create_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3234,11 +3231,10 @@ def __call__( resp = self._interceptor.post_delete_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3557,11 +3553,10 @@ def __call__( resp = self._interceptor.post_list_deployment_resource_pools(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_deployment_resource_pools_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_deployment_resource_pools_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3878,11 +3873,10 @@ def __call__( resp = self._interceptor.post_update_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_asyncio.py index dd802c9e41..5075edb78a 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_asyncio.py @@ -51,9 +51,7 @@ from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.longrunning import operations_pb2 # type: ignore @@ -935,11 +933,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseCreateDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseCreateDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1010,11 +1007,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1107,11 +1103,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseDeleteDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseDeleteDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1177,11 +1172,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1275,11 +1269,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseGetDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseGetDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1345,11 +1338,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1445,11 +1437,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseListDeploymentResourcePools._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_deployment_resource_pools( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_deployment_resource_pools( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseListDeploymentResourcePools._get_transcoded_request( http_options, request @@ -1521,11 +1512,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_deployment_resource_pools(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_deployment_resource_pools_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_deployment_resource_pools_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1784,11 +1774,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseUpdateDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseUpdateDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1859,11 +1848,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_deployment_resource_pool(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_deployment_resource_pool_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_deployment_resource_pool_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_base.py index c355cb531f..edaa8bc9f9 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index fc22524179..f1092e60be 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -1516,19 +1516,19 @@ async def sample_mutate_deployed_model(): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] - - ``required_replica_count`` in - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1beta1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1beta1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index 4e707f7edc..3d69c581db 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -738,11 +738,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = EndpointServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + EndpointServiceClient._read_environment_variables() + ) self._client_cert_source = EndpointServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -2044,19 +2042,19 @@ def sample_mutate_deployed_model(): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] - - ``required_replica_count`` in - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1beta1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1beta1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest_asyncio.py index ccc9a98464..1ef5bc5c42 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest_asyncio.py @@ -1710,11 +1710,10 @@ async def __call__( _BaseEndpointServiceRestTransport._BaseFetchPublisherModelConfig._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_fetch_publisher_model_config( - request, metadata + request, metadata = ( + await self._interceptor.pre_fetch_publisher_model_config( + request, metadata + ) ) transcoded_request = _BaseEndpointServiceRestTransport._BaseFetchPublisherModelConfig._get_transcoded_request( http_options, request @@ -1780,11 +1779,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_fetch_publisher_model_config(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_fetch_publisher_model_config_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_fetch_publisher_model_config_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2432,11 +2430,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_set_publisher_model_config(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_set_publisher_model_config_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_set_publisher_model_config_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2860,11 +2857,10 @@ async def __call__( _BaseEndpointServiceRestTransport._BaseUpdateEndpointLongRunning._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_endpoint_long_running( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_endpoint_long_running( + request, metadata + ) ) transcoded_request = _BaseEndpointServiceRestTransport._BaseUpdateEndpointLongRunning._get_transcoded_request( http_options, request @@ -2935,11 +2931,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_endpoint_long_running(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_endpoint_long_running_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_endpoint_long_running_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py index 2943486a81..02cca35aca 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py @@ -602,11 +602,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = EvaluationServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + EvaluationServiceClient._read_environment_variables() + ) self._client_cert_source = EvaluationServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/example_store_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/example_store_service/async_client.py index 9d6a022e73..f9f2bcbcb4 100644 --- a/google/cloud/aiplatform_v1beta1/services/example_store_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/example_store_service/async_client.py @@ -47,13 +47,9 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.example_store_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.example_store_service import pagers from google.cloud.aiplatform_v1beta1.types import example_store -from google.cloud.aiplatform_v1beta1.types import ( - example_store as gca_example_store, -) +from google.cloud.aiplatform_v1beta1.types import example_store as gca_example_store from google.cloud.aiplatform_v1beta1.types import example_store_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/example_store_service/client.py b/google/cloud/aiplatform_v1beta1/services/example_store_service/client.py index ef1c607bbc..08427802f7 100644 --- a/google/cloud/aiplatform_v1beta1/services/example_store_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/example_store_service/client.py @@ -63,13 +63,9 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.example_store_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.example_store_service import pagers from google.cloud.aiplatform_v1beta1.types import example_store -from google.cloud.aiplatform_v1beta1.types import ( - example_store as gca_example_store, -) +from google.cloud.aiplatform_v1beta1.types import example_store as gca_example_store from google.cloud.aiplatform_v1beta1.types import example_store_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -634,11 +630,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ExampleStoreServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ExampleStoreServiceClient._read_environment_variables() + ) self._client_cert_source = ExampleStoreServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py index fe18b61ccc..b405f2a3ce 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py @@ -649,11 +649,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ExtensionExecutionServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ExtensionExecutionServiceClient._read_environment_variables() + ) self._client_cert_source = ( ExtensionExecutionServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py index b9f49858ab..4eb1d0f772 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import pagers from google.cloud.aiplatform_v1beta1.types import extension from google.cloud.aiplatform_v1beta1.types import extension as gca_extension from google.cloud.aiplatform_v1beta1.types import extension_registry_service diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py index a3b023be0d..304d64c9f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import pagers from google.cloud.aiplatform_v1beta1.types import extension from google.cloud.aiplatform_v1beta1.types import extension as gca_extension from google.cloud.aiplatform_v1beta1.types import extension_registry_service @@ -682,11 +680,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ExtensionRegistryServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ExtensionRegistryServiceClient._read_environment_variables() + ) self._client_cert_source = ( ExtensionRegistryServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py index 77666284f5..453beeacbd 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py @@ -55,13 +55,9 @@ from google.cloud.aiplatform_v1beta1.types import ( feature_online_store as gca_feature_online_store, ) -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view -from google.cloud.aiplatform_v1beta1.types import ( - feature_view as gca_feature_view, -) +from google.cloud.aiplatform_v1beta1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore @@ -856,11 +852,11 @@ async def sample_update_feature_online_store(): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1592,15 +1588,15 @@ async def sample_update_feature_view(): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py index 7132a33450..321bfc1729 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/client.py @@ -71,13 +71,9 @@ from google.cloud.aiplatform_v1beta1.types import ( feature_online_store as gca_feature_online_store, ) -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view -from google.cloud.aiplatform_v1beta1.types import ( - feature_view as gca_feature_view, -) +from google.cloud.aiplatform_v1beta1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore @@ -708,11 +704,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureOnlineStoreAdminServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureOnlineStoreAdminServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeatureOnlineStoreAdminServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert @@ -1357,11 +1351,11 @@ def sample_update_feature_online_store(): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2082,15 +2076,15 @@ def sample_update_feature_view(): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/pagers.py index 4c50fc751a..c4d19bff30 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/pagers.py @@ -38,9 +38,7 @@ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore from google.cloud.aiplatform_v1beta1.types import feature_online_store -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py index 818783586f..059ce2fdde 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/base.py @@ -29,9 +29,7 @@ import google.protobuf from google.cloud.aiplatform_v1beta1.types import feature_online_store -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py index 52b3eab532..d3e035eac6 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc.py @@ -32,9 +32,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import feature_online_store -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py index 83a402eb6f..6d1df4ce96 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/grpc_asyncio.py @@ -35,9 +35,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1beta1.types import feature_online_store -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py index 3fa1e17cf6..f3282b95b5 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1beta1.types import feature_online_store -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_asyncio.py index c982ff0dac..5f27a4aad7 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_asyncio.py @@ -51,9 +51,7 @@ from google.cloud.aiplatform_v1beta1.types import feature_online_store -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.longrunning import operations_pb2 # type: ignore @@ -1453,11 +1451,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1783,11 +1780,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2108,11 +2104,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2600,11 +2595,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_online_stores(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_feature_online_stores_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_feature_online_stores_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2929,11 +2923,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_view_syncs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_feature_view_syncs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_feature_view_syncs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3270,11 +3263,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_feature_online_store(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_feature_online_store_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_feature_online_store_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_base.py index 534842eddd..ad910acced 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1beta1.types import feature_online_store -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py index e55b3ab716..a8e895d00c 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py @@ -48,9 +48,7 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py index cc987796de..6c94b254b2 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/client.py @@ -63,9 +63,7 @@ _LOGGER = std_logging.getLogger(__name__) -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -634,11 +632,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureOnlineStoreServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureOnlineStoreServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeatureOnlineStoreServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py index 66f6296664..2ebf6cf1da 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/base.py @@ -27,9 +27,7 @@ from google.oauth2 import service_account # type: ignore import google.protobuf -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py index 52dc8a5ced..088fc76bf4 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc.py @@ -30,9 +30,7 @@ import grpc # type: ignore import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py index 4e9473af7c..b428e917b7 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/grpc_asyncio.py @@ -33,9 +33,7 @@ import proto # type: ignore from grpc.experimental import aio # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py index d02d1c2b0a..0adc1e01bf 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py @@ -36,9 +36,7 @@ import warnings -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_asyncio.py index 8489323ed2..d7d6a11bd2 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_asyncio.py @@ -48,9 +48,7 @@ from typing import Any, Dict, List, Callable, Tuple, Optional, Sequence, Union -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.longrunning import operations_pb2 # type: ignore @@ -951,11 +949,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_nearest_entities(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_nearest_entities_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_nearest_entities_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_base.py index 644f5a3258..867036c6ed 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest_base.py @@ -27,9 +27,7 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py index 2aa4614416..b180a3de96 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py @@ -47,19 +47,13 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.feature_registry_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import feature_group -from google.cloud.aiplatform_v1beta1.types import ( - feature_group as gca_feature_group, -) +from google.cloud.aiplatform_v1beta1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1beta1.types import feature_monitor -from google.cloud.aiplatform_v1beta1.types import ( - feature_monitor as gca_feature_monitor, -) +from google.cloud.aiplatform_v1beta1.types import feature_monitor as gca_feature_monitor from google.cloud.aiplatform_v1beta1.types import feature_monitor_job from google.cloud.aiplatform_v1beta1.types import ( feature_monitor_job as gca_feature_monitor_job, @@ -815,10 +809,10 @@ async def sample_update_feature_group(): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1683,12 +1677,12 @@ async def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2375,7 +2369,7 @@ async def sample_update_feature_monitor(): Updatable fields: - - ``labels`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py index 0d2c994a34..51f794ef1b 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/client.py @@ -63,19 +63,13 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.feature_registry_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import feature_group -from google.cloud.aiplatform_v1beta1.types import ( - feature_group as gca_feature_group, -) +from google.cloud.aiplatform_v1beta1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1beta1.types import feature_monitor -from google.cloud.aiplatform_v1beta1.types import ( - feature_monitor as gca_feature_monitor, -) +from google.cloud.aiplatform_v1beta1.types import feature_monitor as gca_feature_monitor from google.cloud.aiplatform_v1beta1.types import feature_monitor_job from google.cloud.aiplatform_v1beta1.types import ( feature_monitor_job as gca_feature_monitor_job, @@ -729,11 +723,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureRegistryServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureRegistryServiceClient._read_environment_variables() + ) self._client_cert_source = FeatureRegistryServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1330,10 +1322,10 @@ def sample_update_feature_group(): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2180,12 +2172,12 @@ def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2857,7 +2849,7 @@ def sample_update_feature_monitor(): Updatable fields: - - ``labels`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest_asyncio.py index e7ba3dc504..3b84591f08 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest_asyncio.py @@ -2478,11 +2478,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature_monitor_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_feature_monitor_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_feature_monitor_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3584,11 +3583,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature_monitor_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_feature_monitor_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_feature_monitor_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3909,11 +3907,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_monitor_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_feature_monitor_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_feature_monitor_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py index f3d35d3919..58690c984d 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -638,11 +638,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeaturestoreOnlineServingServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeaturestoreOnlineServingServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeaturestoreOnlineServingServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py index e914ab2b3b..e9399414b1 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py @@ -898,12 +898,27 @@ def __call__( resp = self._interceptor.post_streaming_read_feature_values(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_streaming_read_feature_values_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_streaming_read_feature_values_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "rpcName": "StreamingReadFeatureValues", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _WriteFeatureValues( diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest_asyncio.py index bc7eae08de..59f939e5a9 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest_asyncio.py @@ -910,11 +910,10 @@ async def __call__( _BaseFeaturestoreOnlineServingServiceRestTransport._BaseStreamingReadFeatureValues._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_streaming_read_feature_values( - request, metadata + request, metadata = ( + await self._interceptor.pre_streaming_read_feature_values( + request, metadata + ) ) transcoded_request = _BaseFeaturestoreOnlineServingServiceRestTransport._BaseStreamingReadFeatureValues._get_transcoded_request( http_options, request @@ -984,12 +983,28 @@ async def __call__( ) resp = await self._interceptor.post_streaming_read_feature_values(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_streaming_read_feature_values_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_streaming_read_feature_values_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "rpcName": "StreamingReadFeatureValues", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _WriteFeatureValues( diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index 45ec540dca..130cc1879c 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -47,22 +47,16 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import feature_monitor from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import ( - featurestore as gca_featurestore, -) +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring from google.cloud.aiplatform_v1beta1.types import featurestore_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -799,10 +793,10 @@ async def sample_update_featurestore(): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1502,16 +1496,16 @@ async def sample_update_entity_type(): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2367,12 +2361,12 @@ async def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -3249,15 +3243,15 @@ async def sample_search_features(): to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches characters + within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double @@ -3267,47 +3261,47 @@ async def sample_search_features(): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with + ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index ad8dde514d..bbecbead33 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -63,22 +63,16 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import feature_monitor from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import ( - featurestore as gca_featurestore, -) +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring from google.cloud.aiplatform_v1beta1.types import featurestore_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -697,11 +691,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeaturestoreServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeaturestoreServiceClient._read_environment_variables() + ) self._client_cert_source = FeaturestoreServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1296,10 +1288,10 @@ def sample_update_featurestore(): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1984,16 +1976,16 @@ def sample_update_entity_type(): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2831,12 +2823,12 @@ def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -3697,15 +3689,15 @@ def sample_search_features(): to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches characters + within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double @@ -3715,47 +3707,47 @@ def sample_search_features(): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with + ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py index 0d387c99a6..1fa9bd28fe 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py @@ -29,9 +29,7 @@ import google.protobuf from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import featurestore diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py index 4e7e120941..d7c72dcfcc 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py @@ -32,9 +32,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import featurestore diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py index 211cdb90b2..2d8b94deee 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py @@ -35,9 +35,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import featurestore diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py index ffffb893b0..e60d2677c1 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import featurestore diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_asyncio.py index a3a302e695..2f768737f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_asyncio.py @@ -51,9 +51,7 @@ from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import featurestore @@ -2100,11 +2098,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_read_feature_values(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_read_feature_values_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_read_feature_values_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_base.py index e491a2edc2..397b67ac0c 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import featurestore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py index d4bc5eff93..baa425e26d 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py @@ -45,13 +45,9 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import pagers from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py index 6a336a08fb..5f0ebc1193 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/client.py @@ -61,13 +61,9 @@ _LOGGER = std_logging.getLogger(__name__) -from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import pagers from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service @@ -657,11 +653,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = GenAiCacheServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + GenAiCacheServiceClient._read_environment_variables() + ) self._client_cert_source = GenAiCacheServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py index 849558a20f..84bc631c54 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/base.py @@ -28,9 +28,7 @@ import google.protobuf from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py index a36968af6f..1076388fd6 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc.py @@ -31,9 +31,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py index 7b3dc5bdbd..f98ff8f003 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/grpc_asyncio.py @@ -34,9 +34,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py index 265d3b84da..2232c28f23 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py @@ -37,9 +37,7 @@ from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_asyncio.py index c301493577..653d683e4a 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_asyncio.py @@ -49,9 +49,7 @@ from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_base.py index 4d8cfafc8b..70ec0211e1 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py index cbd2ff6f85..8a0faf387d 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py @@ -47,16 +47,12 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py index 5948936048..7d5cc307ee 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/client.py @@ -63,16 +63,12 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -725,11 +721,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = GenAiTuningServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + GenAiTuningServiceClient._read_environment_variables() + ) self._client_cert_source = GenAiTuningServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py index cd86b03b90..868c0af27e 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/base.py @@ -30,9 +30,7 @@ from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py index 8908381cc2..d5e777abed 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc.py @@ -33,9 +33,7 @@ from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py index 8ddef1785a..38a12f54fe 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/grpc_asyncio.py @@ -36,9 +36,7 @@ from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py index 0b8ee6cf1a..d2bbc8772a 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py @@ -39,9 +39,7 @@ from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_asyncio.py index b35c2e8c95..d8be7a4cf6 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_asyncio.py @@ -52,9 +52,7 @@ from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_base.py index 2aa9668dea..f92eedb370 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest_base.py @@ -29,9 +29,7 @@ from google.cloud.aiplatform_v1beta1.types import genai_tuning_service from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index df06d158c3..9e67502d72 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -47,14 +47,10 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import service_networking diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py index 14d0f971f7..e48683072b 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -63,14 +63,10 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import service_networking @@ -681,11 +677,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = IndexEndpointServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + IndexEndpointServiceClient._read_environment_variables() + ) self._client_cert_source = IndexEndpointServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py index 2bc1865fae..ea9f5d1cd1 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -29,9 +29,7 @@ import google.protobuf from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py index b1b486b84c..309060074d 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -32,9 +32,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py index c3495197ee..5e314ed1f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -35,9 +35,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py index 2680f1b9ae..06df05fb1f 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_asyncio.py index 2ac43df008..4fa9c57b56 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_asyncio.py @@ -51,9 +51,7 @@ from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_base.py index 4377ec5d1a..2adcef0b21 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_service/client.py index 475e506a1c..a1560439c1 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -652,11 +652,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = IndexServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + IndexServiceClient._read_environment_variables() + ) self._client_cert_source = IndexServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index 9be543b172..b61c802545 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -54,9 +54,7 @@ ) from google.cloud.aiplatform_v1beta1.types import completion_stats from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -71,12 +69,8 @@ from google.cloud.aiplatform_v1beta1.types import job_service from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import ( - manual_batch_tuning_parameters, -) -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) @@ -4351,18 +4345,17 @@ async def sample_update_model_deployment_monitoring_job(): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index e88536d10b..988ef7dbfc 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -70,9 +70,7 @@ ) from google.cloud.aiplatform_v1beta1.types import completion_stats from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -87,12 +85,8 @@ from google.cloud.aiplatform_v1beta1.types import job_service from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import ( - manual_batch_tuning_parameters, -) -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) @@ -1040,11 +1034,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = JobServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + JobServiceClient._read_environment_variables() + ) self._client_cert_source = JobServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -5075,18 +5067,17 @@ def sample_update_model_deployment_monitoring_job(): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py index 67b31b2f2b..d97cedd8f1 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py @@ -42,9 +42,7 @@ from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py index a9a1d794ab..10310a411a 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -33,9 +33,7 @@ batch_prediction_job as gca_batch_prediction_job, ) from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -45,9 +43,7 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index c8c4e5843d..470573d328 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -36,9 +36,7 @@ batch_prediction_job as gca_batch_prediction_job, ) from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -48,9 +46,7 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py index 1f3514b689..93f8468d4f 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -39,9 +39,7 @@ batch_prediction_job as gca_batch_prediction_job, ) from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -51,9 +49,7 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py index 00fdc033ce..db10cf2bdf 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py @@ -42,9 +42,7 @@ batch_prediction_job as gca_batch_prediction_job, ) from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -54,9 +52,7 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) @@ -5571,11 +5567,10 @@ def __call__( resp = self._interceptor.post_create_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5672,11 +5667,10 @@ def __call__( _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_create_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_create_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -5746,11 +5740,10 @@ def __call__( resp = self._interceptor.post_create_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6512,11 +6505,10 @@ def __call__( resp = self._interceptor.post_delete_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6606,11 +6598,10 @@ def __call__( _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_delete_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_delete_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -6669,11 +6660,10 @@ def __call__( resp = self._interceptor.post_delete_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7446,11 +7436,10 @@ def __call__( resp = self._interceptor.post_get_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7546,11 +7535,10 @@ def __call__( _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_get_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_get_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7615,11 +7603,10 @@ def __call__( resp = self._interceptor.post_get_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8535,11 +8522,10 @@ def __call__( resp = self._interceptor.post_list_hyperparameter_tuning_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8632,11 +8618,10 @@ def __call__( _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_list_model_deployment_monitoring_jobs( - request, metadata + request, metadata = ( + self._interceptor.pre_list_model_deployment_monitoring_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_transcoded_request( http_options, request @@ -8697,11 +8682,10 @@ def __call__( resp = self._interceptor.post_list_model_deployment_monitoring_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -9093,11 +9077,10 @@ def __call__( _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_pause_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_pause_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -9214,11 +9197,10 @@ def __call__( _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_resume_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_resume_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -9343,11 +9325,10 @@ def __call__( _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( - request, metadata + request, metadata = ( + self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_transcoded_request( http_options, request @@ -9419,11 +9400,10 @@ def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -9516,11 +9496,10 @@ def __call__( _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_update_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_update_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -9584,11 +9563,10 @@ def __call__( resp = self._interceptor.post_update_model_deployment_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_asyncio.py index bf15125baa..b106a093a7 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_asyncio.py @@ -55,9 +55,7 @@ batch_prediction_job as gca_batch_prediction_job, ) from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -67,9 +65,7 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) @@ -2861,11 +2857,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCancelHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_cancel_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_cancel_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCancelHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -3199,11 +3194,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_batch_prediction_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_batch_prediction_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_batch_prediction_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3539,11 +3533,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_data_labeling_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_data_labeling_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_data_labeling_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3638,11 +3631,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCreateHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -3713,11 +3705,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3817,11 +3808,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -3898,11 +3888,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4230,11 +4219,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_batch_prediction_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_batch_prediction_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_batch_prediction_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4551,11 +4539,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_data_labeling_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_data_labeling_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_data_labeling_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4646,11 +4633,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseDeleteHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -4716,11 +4702,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4813,11 +4798,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -4885,11 +4869,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5212,11 +5195,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_batch_prediction_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_batch_prediction_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_batch_prediction_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5637,11 +5619,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseGetHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -5707,11 +5688,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_hyperparameter_tuning_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5808,11 +5788,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -5882,11 +5861,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6365,11 +6343,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_batch_prediction_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_batch_prediction_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_batch_prediction_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6688,11 +6665,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_data_labeling_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_data_labeling_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_data_labeling_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6784,11 +6760,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseListHyperparameterTuningJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_hyperparameter_tuning_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_hyperparameter_tuning_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListHyperparameterTuningJobs._get_transcoded_request( http_options, request @@ -6854,11 +6829,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_hyperparameter_tuning_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6954,11 +6928,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_deployment_monitoring_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_deployment_monitoring_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_transcoded_request( http_options, request @@ -7026,11 +6999,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7441,11 +7413,10 @@ async def __call__( _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_pause_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_pause_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7570,11 +7541,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_resume_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_resume_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7705,11 +7675,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( - request, metadata + request, metadata = ( + await self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_transcoded_request( http_options, request @@ -7786,11 +7755,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7886,11 +7854,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7963,11 +7930,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_base.py index 5ae160b8d4..ff44eeea67 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest_base.py @@ -32,9 +32,7 @@ batch_prediction_job as gca_batch_prediction_job, ) from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -44,9 +42,7 @@ hyperparameter_tuning_job as gca_hyperparameter_tuning_job, ) from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py index 5d1175b0e9..00702ec2a7 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/client.py @@ -623,11 +623,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = LlmUtilityServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + LlmUtilityServiceClient._read_environment_variables() + ) self._client_cert_source = LlmUtilityServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/client.py b/google/cloud/aiplatform_v1beta1/services/match_service/client.py index e64c463366..ad773e1d1c 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/client.py @@ -619,11 +619,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MatchServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MatchServiceClient._read_environment_variables() + ) self._client_cert_source = MatchServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/memory_bank_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/memory_bank_service/async_client.py index 6619f739da..bcfff32a29 100644 --- a/google/cloud/aiplatform_v1beta1/services/memory_bank_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/memory_bank_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.memory_bank_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.memory_bank_service import pagers from google.cloud.aiplatform_v1beta1.types import memory_bank from google.cloud.aiplatform_v1beta1.types import memory_bank_service from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/memory_bank_service/client.py b/google/cloud/aiplatform_v1beta1/services/memory_bank_service/client.py index bab5cafe7a..7437b3f228 100644 --- a/google/cloud/aiplatform_v1beta1/services/memory_bank_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/memory_bank_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.memory_bank_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.memory_bank_service import pagers from google.cloud.aiplatform_v1beta1.types import memory_bank from google.cloud.aiplatform_v1beta1.types import memory_bank_service from google.cloud.location import locations_pb2 # type: ignore @@ -680,11 +678,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MemoryBankServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MemoryBankServiceClient._read_environment_variables() + ) self._client_cert_source = MemoryBankServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py index db4c94e9b3..f207683e87 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -58,14 +58,10 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py index 24220d50ad..4c704b2a53 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -74,14 +74,10 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -741,11 +737,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MetadataServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MetadataServiceClient._read_environment_variables() + ) self._client_cert_source = MetadataServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py index 76eed42cef..6e99d47b3a 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py @@ -36,9 +36,7 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py index d216bd8fd7..5a41aa50b7 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -39,9 +39,7 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py index 5d5403a657..0feeb9d608 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -42,9 +42,7 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store from google.cloud.location import locations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py index a6d6031750..f814b56b46 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py @@ -45,9 +45,7 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store from google.longrunning import operations_pb2 # type: ignore @@ -4491,11 +4489,10 @@ def __call__( _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_add_context_artifacts_and_executions( - request, metadata + request, metadata = ( + self._interceptor.pre_add_context_artifacts_and_executions( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_transcoded_request( http_options, request @@ -4561,11 +4558,10 @@ def __call__( resp = self._interceptor.post_add_context_artifacts_and_executions(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_add_context_artifacts_and_executions_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_add_context_artifacts_and_executions_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8356,11 +8352,10 @@ def __call__( resp = self._interceptor.post_query_artifact_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8516,11 +8511,10 @@ def __call__( resp = self._interceptor.post_query_context_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_query_context_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_query_context_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8612,11 +8606,10 @@ def __call__( _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_query_execution_inputs_and_outputs( - request, metadata + request, metadata = ( + self._interceptor.pre_query_execution_inputs_and_outputs( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_transcoded_request( http_options, request @@ -8677,11 +8670,10 @@ def __call__( resp = self._interceptor.post_query_execution_inputs_and_outputs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_asyncio.py index bff3733be9..e90690db52 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_asyncio.py @@ -58,9 +58,7 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store from google.longrunning import operations_pb2 # type: ignore @@ -2551,11 +2549,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_add_context_artifacts_and_executions( - request, metadata + request, metadata = ( + await self._interceptor.pre_add_context_artifacts_and_executions( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_transcoded_request( http_options, request @@ -2628,11 +2625,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_add_context_artifacts_and_executions_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_add_context_artifacts_and_executions_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6594,11 +6590,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryArtifactLineageSubgraph._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_artifact_lineage_subgraph( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_artifact_lineage_subgraph( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryArtifactLineageSubgraph._get_transcoded_request( http_options, request @@ -6664,11 +6659,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_artifact_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6761,11 +6755,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryContextLineageSubgraph._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_context_lineage_subgraph( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_context_lineage_subgraph( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryContextLineageSubgraph._get_transcoded_request( http_options, request @@ -6831,11 +6824,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_context_lineage_subgraph(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_query_context_lineage_subgraph_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_query_context_lineage_subgraph_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6930,11 +6922,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_execution_inputs_and_outputs( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_execution_inputs_and_outputs( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_transcoded_request( http_options, request @@ -7000,11 +6991,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_execution_inputs_and_outputs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7168,11 +7158,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_remove_context_children(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_remove_context_children_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_remove_context_children_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_base.py index bfff89a6f0..b7c22332e1 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest_base.py @@ -35,9 +35,7 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 4c8c4db4d0..0b00060299 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -243,40 +243,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod @@ -753,11 +753,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MigrationServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MigrationServiceClient._read_environment_variables() + ) self._client_cert_source = MigrationServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest_asyncio.py index 163000c7c0..2b9cae45fc 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest_asyncio.py @@ -743,11 +743,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_migrate_resources(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_migrate_resources_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_migrate_resources_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -910,11 +909,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_migratable_resources(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_migratable_resources_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_migratable_resources_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py index 3e3223daa2..49830e6503 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py @@ -48,9 +48,7 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_garden_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.model_garden_service import pagers from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model_garden_service from google.cloud.aiplatform_v1beta1.types import publisher_model diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py index ca4cff5d56..b72f32229f 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_garden_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.model_garden_service import pagers from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model_garden_service from google.cloud.aiplatform_v1beta1.types import publisher_model @@ -690,11 +688,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ModelGardenServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ModelGardenServiceClient._read_environment_variables() + ) self._client_cert_source = ModelGardenServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py index 57683c6e28..14257c3d70 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py @@ -3225,11 +3225,10 @@ def __call__( _BaseModelGardenServiceRestTransport._BaseCheckPublisherModelEulaAcceptance._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_check_publisher_model_eula_acceptance( - request, metadata + request, metadata = ( + self._interceptor.pre_check_publisher_model_eula_acceptance( + request, metadata + ) ) transcoded_request = _BaseModelGardenServiceRestTransport._BaseCheckPublisherModelEulaAcceptance._get_transcoded_request( http_options, request @@ -3295,11 +3294,10 @@ def __call__( resp = self._interceptor.post_check_publisher_model_eula_acceptance(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_check_publisher_model_eula_acceptance_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_check_publisher_model_eula_acceptance_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest_asyncio.py index 6373a08df7..5015de3897 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest_asyncio.py @@ -1058,11 +1058,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_accept_publisher_model_eula(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_accept_publisher_model_eula_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_accept_publisher_model_eula_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1159,11 +1158,10 @@ async def __call__( _BaseModelGardenServiceRestTransport._BaseCheckPublisherModelEulaAcceptance._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_check_publisher_model_eula_acceptance( - request, metadata + request, metadata = ( + await self._interceptor.pre_check_publisher_model_eula_acceptance( + request, metadata + ) ) transcoded_request = _BaseModelGardenServiceRestTransport._BaseCheckPublisherModelEulaAcceptance._get_transcoded_request( http_options, request @@ -1236,11 +1234,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_check_publisher_model_eula_acceptance_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_check_publisher_model_eula_acceptance_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py index 370aa12d11..da06742dd1 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py @@ -47,16 +47,12 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import model_monitor -from google.cloud.aiplatform_v1beta1.types import ( - model_monitor as gca_model_monitor, -) +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert from google.cloud.aiplatform_v1beta1.types import model_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py index efd383d661..78840cc141 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py @@ -63,16 +63,12 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import model_monitor -from google.cloud.aiplatform_v1beta1.types import ( - model_monitor as gca_model_monitor, -) +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert from google.cloud.aiplatform_v1beta1.types import model_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( @@ -806,11 +802,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ModelMonitoringServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ModelMonitoringServiceClient._read_environment_variables() + ) self._client_cert_source = ModelMonitoringServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py index acada77b5b..7e5999c4e3 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py @@ -4618,11 +4618,10 @@ def __call__( resp = self._interceptor.post_search_model_monitoring_alerts(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_search_model_monitoring_alerts_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_search_model_monitoring_alerts_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4785,11 +4784,10 @@ def __call__( resp = self._interceptor.post_search_model_monitoring_stats(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_search_model_monitoring_stats_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_search_model_monitoring_stats_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest_asyncio.py index 2f99cd70e1..76e80c5896 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest_asyncio.py @@ -1490,11 +1490,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_model_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_model_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_model_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1814,11 +1813,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_model_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_model_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_model_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2139,11 +2137,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_model_monitoring_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_model_monitoring_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_model_monitoring_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2304,11 +2301,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_monitoring_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_model_monitoring_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_model_monitoring_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2564,11 +2560,10 @@ async def __call__( _BaseModelMonitoringServiceRestTransport._BaseSearchModelMonitoringAlerts._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_search_model_monitoring_alerts( - request, metadata + request, metadata = ( + await self._interceptor.pre_search_model_monitoring_alerts( + request, metadata + ) ) transcoded_request = _BaseModelMonitoringServiceRestTransport._BaseSearchModelMonitoringAlerts._get_transcoded_request( http_options, request @@ -2641,11 +2636,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_model_monitoring_alerts(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_model_monitoring_alerts_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_model_monitoring_alerts_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2740,11 +2734,10 @@ async def __call__( _BaseModelMonitoringServiceRestTransport._BaseSearchModelMonitoringStats._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_search_model_monitoring_stats( - request, metadata + request, metadata = ( + await self._interceptor.pre_search_model_monitoring_stats( + request, metadata + ) ) transcoded_request = _BaseModelMonitoringServiceRestTransport._BaseSearchModelMonitoringStats._get_transcoded_request( http_options, request @@ -2817,11 +2810,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_model_monitoring_stats(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_search_model_monitoring_stats_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_search_model_monitoring_stats_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 4dc6a7c254..3ab5e90aa5 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -751,11 +751,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ModelServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ModelServiceClient._read_environment_variables() + ) self._client_cert_source = ModelServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py index ab78602bd1..ba05256529 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py @@ -3801,11 +3801,10 @@ def __call__( _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_import_evaluated_annotations( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_import_evaluated_annotations( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_transcoded_request( http_options, request @@ -3871,11 +3870,10 @@ def __call__( resp = self._interceptor.post_batch_import_evaluated_annotations(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_import_evaluated_annotations_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_import_evaluated_annotations_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3969,11 +3967,10 @@ def __call__( _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_import_model_evaluation_slices( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_import_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_transcoded_request( http_options, request @@ -4039,11 +4036,10 @@ def __call__( resp = self._interceptor.post_batch_import_model_evaluation_slices(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5898,11 +5894,10 @@ def __call__( resp = self._interceptor.post_list_model_version_checkpoints(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_model_version_checkpoints_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_model_version_checkpoints_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest_asyncio.py index 5cf58e9018..26998c443c 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest_asyncio.py @@ -1795,11 +1795,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_import_evaluated_annotations( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_import_evaluated_annotations( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_transcoded_request( http_options, request @@ -1870,11 +1869,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_import_evaluated_annotations(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_import_evaluated_annotations_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_import_evaluated_annotations_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1971,11 +1969,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_import_model_evaluation_slices( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_import_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_transcoded_request( http_options, request @@ -2048,11 +2045,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3191,11 +3187,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_model_evaluation_slice(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_model_evaluation_slice_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_model_evaluation_slice_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3361,11 +3356,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_import_model_evaluation(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_import_model_evaluation_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_import_model_evaluation_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3616,11 +3610,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseListModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_evaluation_slices( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseListModelEvaluationSlices._get_transcoded_request( http_options, request @@ -3686,11 +3679,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_evaluation_slices(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_model_evaluation_slices_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_model_evaluation_slices_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3946,11 +3938,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_version_checkpoints( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_version_checkpoints( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_transcoded_request( http_options, request @@ -4016,11 +4007,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_version_checkpoints(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_model_version_checkpoints_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_model_version_checkpoints_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4677,11 +4667,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_explanation_dataset(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_explanation_dataset_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_explanation_dataset_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py index 28f244e870..c1a8d056f7 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py @@ -57,16 +57,12 @@ from google.cloud.aiplatform_v1beta1.types import ( notebook_execution_job as gca_notebook_execution_job, ) -from google.cloud.aiplatform_v1beta1.types import ( - notebook_idle_shutdown_config, -) +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1beta1.types import notebook_runtime from google.cloud.aiplatform_v1beta1.types import ( notebook_runtime as gca_notebook_runtime, ) -from google.cloud.aiplatform_v1beta1.types import ( - notebook_runtime_template_ref, -) +from google.cloud.aiplatform_v1beta1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1beta1.types import notebook_service from google.cloud.aiplatform_v1beta1.types import notebook_software_config from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -958,7 +954,7 @@ async def sample_update_notebook_runtime_template(): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py index 4474109e87..b82c209c75 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py @@ -73,16 +73,12 @@ from google.cloud.aiplatform_v1beta1.types import ( notebook_execution_job as gca_notebook_execution_job, ) -from google.cloud.aiplatform_v1beta1.types import ( - notebook_idle_shutdown_config, -) +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1beta1.types import notebook_runtime from google.cloud.aiplatform_v1beta1.types import ( notebook_runtime as gca_notebook_runtime, ) -from google.cloud.aiplatform_v1beta1.types import ( - notebook_runtime_template_ref, -) +from google.cloud.aiplatform_v1beta1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1beta1.types import notebook_service from google.cloud.aiplatform_v1beta1.types import notebook_software_config from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -780,11 +776,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = NotebookServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + NotebookServiceClient._read_environment_variables() + ) self._client_cert_source = NotebookServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1524,7 +1518,7 @@ def sample_update_notebook_runtime_template(): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py index 17ec073e20..b5750a814d 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py @@ -3812,11 +3812,10 @@ def __call__( resp = self._interceptor.post_create_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3972,11 +3971,10 @@ def __call__( resp = self._interceptor.post_create_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4128,11 +4126,10 @@ def __call__( resp = self._interceptor.post_delete_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4434,11 +4431,10 @@ def __call__( resp = self._interceptor.post_delete_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4908,11 +4904,10 @@ def __call__( resp = self._interceptor.post_get_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_get_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_get_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5376,11 +5371,10 @@ def __call__( resp = self._interceptor.post_list_notebook_runtime_templates(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_list_notebook_runtime_templates_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_list_notebook_runtime_templates_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5856,11 +5850,10 @@ def __call__( resp = self._interceptor.post_update_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest_asyncio.py index 8d2ac5fec3..6d309dc061 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest_asyncio.py @@ -1630,11 +1630,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_assign_notebook_runtime(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_assign_notebook_runtime_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_assign_notebook_runtime_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1726,11 +1725,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseCreateNotebookExecutionJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_notebook_execution_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_notebook_execution_job( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseCreateNotebookExecutionJob._get_transcoded_request( http_options, request @@ -1801,11 +1799,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1899,11 +1896,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseCreateNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseCreateNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -1974,11 +1970,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2069,11 +2064,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseDeleteNotebookExecutionJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_notebook_execution_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_notebook_execution_job( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseDeleteNotebookExecutionJob._get_transcoded_request( http_options, request @@ -2139,11 +2133,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2300,11 +2293,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_runtime(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_notebook_runtime_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_notebook_runtime_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2397,11 +2389,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseDeleteNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseDeleteNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -2467,11 +2458,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2628,11 +2618,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_notebook_execution_job(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_notebook_execution_job_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_notebook_execution_job_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2891,11 +2880,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseGetNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseGetNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -2961,11 +2949,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3057,11 +3044,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseListNotebookExecutionJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_notebook_execution_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_notebook_execution_jobs( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseListNotebookExecutionJobs._get_transcoded_request( http_options, request @@ -3127,11 +3113,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_notebook_execution_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_notebook_execution_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_notebook_execution_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3386,11 +3371,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseListNotebookRuntimeTemplates._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_notebook_runtime_templates( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_notebook_runtime_templates( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseListNotebookRuntimeTemplates._get_transcoded_request( http_options, request @@ -3456,11 +3440,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_notebook_runtime_templates(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_notebook_runtime_templates_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_notebook_runtime_templates_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3888,11 +3871,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseUpdateNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseUpdateNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -3963,11 +3945,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_notebook_runtime_template(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_notebook_runtime_template_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_notebook_runtime_template_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4132,11 +4113,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_upgrade_notebook_runtime(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_upgrade_notebook_runtime_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_upgrade_notebook_runtime_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py index 310e82ef5c..33f7392cfc 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import persistent_resource diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py index b804458221..96ddf33c92 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import persistent_resource @@ -729,11 +727,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PersistentResourceServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PersistentResourceServiceClient._read_environment_variables() + ) self._client_cert_source = ( PersistentResourceServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest_asyncio.py index 09733b8169..b5d14de7cf 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest_asyncio.py @@ -1001,11 +1001,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1165,11 +1164,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1330,11 +1328,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1496,11 +1493,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_persistent_resources(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_persistent_resources_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_persistent_resources_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1668,11 +1664,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_reboot_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_reboot_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_reboot_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1838,11 +1833,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_persistent_resource(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_persistent_resource_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_persistent_resource_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index f014098009..52e3cbff33 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -52,9 +52,7 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import pipeline_state from google.cloud.aiplatform_v1beta1.types import service_networking diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 9ffde922c8..c134a22a10 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -68,9 +68,7 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import pipeline_state from google.cloud.aiplatform_v1beta1.types import service_networking @@ -845,11 +843,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PipelineServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PipelineServiceClient._read_environment_variables() + ) self._client_cert_source = PipelineServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py index 1f5d55f3a6..7e362ddcbd 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -29,9 +29,7 @@ import google.protobuf from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 2bfeb826d2..9b3e3ba504 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -32,9 +32,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index 14d0f062da..e2637e210e 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -35,9 +35,7 @@ from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py index d49c1686c9..16ef2e99e6 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py @@ -38,9 +38,7 @@ from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_asyncio.py index 2410fe0065..df13d84856 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_asyncio.py @@ -51,9 +51,7 @@ from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( @@ -1305,11 +1303,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_cancel_pipeline_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_cancel_pipeline_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_cancel_pipeline_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1473,11 +1470,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_delete_pipeline_jobs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_delete_pipeline_jobs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_delete_pipeline_jobs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2052,11 +2048,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_training_pipeline(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_training_pipeline_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_training_pipeline_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2373,11 +2368,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_training_pipeline(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_training_pipeline_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_training_pipeline_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3016,11 +3010,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_training_pipelines(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_training_pipelines_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_training_pipelines_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_base.py index d3329c2031..58dd5c2167 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import training_pipeline from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index b18e5e74c5..c61938ce46 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -477,13 +477,13 @@ async def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. .. code-block:: python diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index f4a49a3f44..975f0dbb7c 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -719,11 +719,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PredictionServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PredictionServiceClient._read_environment_variables() + ) self._client_cert_source = PredictionServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1017,13 +1015,13 @@ def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. .. code-block:: python diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 7df86baea8..2b29a8e12e 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -363,13 +363,13 @@ def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index 788cdb1a09..9afe0a5e8a 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -374,13 +374,13 @@ def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py index d4b81203a6..4882e601a2 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py @@ -1242,6 +1242,22 @@ def __call__( resp, _ = self._interceptor.post_chat_completions_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceClient.chat_completions", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "ChatCompletions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CountTokens( @@ -2536,6 +2552,22 @@ def __call__( resp, _ = self._interceptor.post_server_streaming_predict_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceClient.server_streaming_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "ServerStreamingPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _StreamDirectPredict( @@ -2711,6 +2743,22 @@ def __call__( resp, _ = self._interceptor.post_stream_generate_content_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_generate_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "StreamGenerateContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _StreamingPredict( @@ -2930,6 +2978,22 @@ def __call__( resp, _ = self._interceptor.post_stream_raw_predict_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_raw_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "StreamRawPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest_asyncio.py index 9ab121c218..9a575189f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest_asyncio.py @@ -1382,6 +1382,23 @@ async def __call__( resp, _ = await self._interceptor.post_chat_completions_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.chat_completions", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "ChatCompletions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _CountTokens( @@ -2731,12 +2748,28 @@ async def __call__( ) resp = await self._interceptor.post_server_streaming_predict(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_server_streaming_predict_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_server_streaming_predict_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.server_streaming_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "ServerStreamingPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _StreamDirectPredict( @@ -2911,12 +2944,28 @@ async def __call__( ) resp = await self._interceptor.post_stream_generate_content(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_stream_generate_content_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_stream_generate_content_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_generate_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "StreamGenerateContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _StreamingPredict( @@ -3142,6 +3191,23 @@ async def __call__( resp, _ = await self._interceptor.post_stream_raw_predict_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_raw_predict", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.PredictionService", + "rpcName": "StreamRawPredict", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp @property diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py index 2d6a85629e..1ccdf9388f 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py @@ -48,9 +48,7 @@ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py index 693c116b7c..f736fc7761 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/client.py @@ -63,9 +63,7 @@ _LOGGER = std_logging.getLogger(__name__) from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -641,11 +639,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ReasoningEngineExecutionServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ReasoningEngineExecutionServiceClient._read_environment_variables() + ) self._client_cert_source = ( ReasoningEngineExecutionServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py index 5d2c7098ae..36b0280d71 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/base.py @@ -28,9 +28,7 @@ import google.protobuf from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py index c11b50b41d..ca817ed13e 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc.py @@ -31,9 +31,7 @@ import proto # type: ignore from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py index d71359bad8..ff17475aec 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/grpc_asyncio.py @@ -34,9 +34,7 @@ from grpc.experimental import aio # type: ignore from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py index 6f09efe78b..3e95851dd4 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py @@ -37,9 +37,7 @@ from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.longrunning import operations_pb2 # type: ignore @@ -886,12 +884,27 @@ def __call__( resp = self._interceptor.post_stream_query_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_stream_query_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_stream_query_reasoning_engine_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceClient.stream_query_reasoning_engine", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService", + "rpcName": "StreamQueryReasoningEngine", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_asyncio.py index 5bc23fb559..135b3aff4d 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_asyncio.py @@ -49,9 +49,7 @@ from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.longrunning import operations_pb2 # type: ignore @@ -895,11 +893,10 @@ async def __call__( _BaseReasoningEngineExecutionServiceRestTransport._BaseStreamQueryReasoningEngine._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_stream_query_reasoning_engine( - request, metadata + request, metadata = ( + await self._interceptor.pre_stream_query_reasoning_engine( + request, metadata + ) ) transcoded_request = _BaseReasoningEngineExecutionServiceRestTransport._BaseStreamQueryReasoningEngine._get_transcoded_request( http_options, request @@ -969,12 +966,28 @@ async def __call__( ) resp = await self._interceptor.post_stream_query_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_stream_query_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_stream_query_reasoning_engine_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceAsyncClient.stream_query_reasoning_engine", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService", + "rpcName": "StreamQueryReasoningEngine", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp @property diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_base.py index 5ce4bdf1d5..0f4c652851 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest_base.py @@ -28,9 +28,7 @@ from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.longrunning import operations_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py index 5f373c2746..1708bed0ca 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import reasoning_engine diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py index 3c4c5e107b..1bab3f8abe 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import reasoning_engine @@ -682,11 +680,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ReasoningEngineServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ReasoningEngineServiceClient._read_environment_variables() + ) self._client_cert_source = ReasoningEngineServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest_asyncio.py index c23c36df99..7ee9e38ddc 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest_asyncio.py @@ -935,11 +935,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_reasoning_engine_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1098,11 +1097,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_reasoning_engine_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -1589,11 +1587,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_reasoning_engine(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_reasoning_engine_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_reasoning_engine_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py index af90b4f682..1b7b400427 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py @@ -1153,9 +1153,9 @@ async def sample_update_schedule(): Required. The Schedule which replaces the resource on the server. The following restrictions will be applied: - - The scheduled request type cannot be changed. - - The non-empty fields cannot be unset. - - The output_only fields will be ignored if specified. + - The scheduled request type cannot be changed. + - The non-empty fields cannot be unset. + - The output_only fields will be ignored if specified. This corresponds to the ``schedule`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py index ccae8c4ec7..0958ead2fa 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py @@ -992,11 +992,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ScheduleServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ScheduleServiceClient._read_environment_variables() + ) self._client_cert_source = ScheduleServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1899,9 +1897,9 @@ def sample_update_schedule(): Required. The Schedule which replaces the resource on the server. The following restrictions will be applied: - - The scheduled request type cannot be changed. - - The non-empty fields cannot be unset. - - The output_only fields will be ignored if specified. + - The scheduled request type cannot be changed. + - The non-empty fields cannot be unset. + - The output_only fields will be ignored if specified. This corresponds to the ``schedule`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/session_service/client.py b/google/cloud/aiplatform_v1beta1/services/session_service/client.py index 07b6913c8c..5f57803d6b 100644 --- a/google/cloud/aiplatform_v1beta1/services/session_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/session_service/client.py @@ -679,11 +679,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = SessionServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + SessionServiceClient._read_environment_variables() + ) self._client_cert_source = SessionServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index 4946259fb1..65b178500d 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -47,14 +47,10 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool from google.cloud.aiplatform_v1beta1.types import specialist_pool_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index 0ac040a250..38b53b0b78 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -63,14 +63,10 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool from google.cloud.aiplatform_v1beta1.types import specialist_pool_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -642,11 +638,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = SpecialistPoolServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + SpecialistPoolServiceClient._read_environment_variables() + ) self._client_cert_source = SpecialistPoolServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index e1ed0397c8..edf37f4aad 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -49,24 +49,18 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard as gca_tensorboard, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard from google.cloud.aiplatform_v1beta1.types import tensorboard_data from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment from google.cloud.aiplatform_v1beta1.types import ( tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py index 98a756e87f..7e4ba1c57f 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -64,24 +64,18 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard as gca_tensorboard, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard from google.cloud.aiplatform_v1beta1.types import tensorboard_data from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment from google.cloud.aiplatform_v1beta1.types import ( tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( @@ -730,11 +724,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = TensorboardServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + TensorboardServiceClient._read_environment_variables() + ) self._client_cert_source = TensorboardServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py index 1dacf58414..cfd2021f58 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -34,9 +34,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py index f0eba0b08f..70054c234b 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -37,9 +37,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py index ac7f2cfc2f..e08ae30b70 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py @@ -40,9 +40,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py index 16ed934d49..689cec9cc1 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py @@ -43,9 +43,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( @@ -4504,11 +4502,10 @@ def __call__( resp = self._interceptor.post_batch_create_tensorboard_runs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_create_tensorboard_runs_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_create_tensorboard_runs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4604,11 +4601,10 @@ def __call__( _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_create_tensorboard_time_series( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_create_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -4676,11 +4672,10 @@ def __call__( resp = self._interceptor.post_batch_create_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4773,11 +4768,10 @@ def __call__( _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_read_tensorboard_time_series_data( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_read_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -4840,11 +4834,10 @@ def __call__( resp = self._interceptor.post_batch_read_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5161,11 +5154,10 @@ def __call__( resp = self._interceptor.post_create_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5489,11 +5481,10 @@ def __call__( resp = self._interceptor.post_create_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5797,11 +5788,10 @@ def __call__( resp = self._interceptor.post_delete_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6103,11 +6093,10 @@ def __call__( resp = self._interceptor.post_delete_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_delete_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_delete_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6199,11 +6188,10 @@ def __call__( _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_export_tensorboard_time_series_data( - request, metadata + request, metadata = ( + self._interceptor.pre_export_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -6271,11 +6259,10 @@ def __call__( resp = self._interceptor.post_export_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_export_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_export_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7675,6 +7662,22 @@ def __call__( resp, _ = self._interceptor.post_read_tensorboard_blob_data_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_blob_data", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "rpcName": "ReadTensorboardBlobData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ReadTensorboardSize( @@ -7960,11 +7963,10 @@ def __call__( resp = self._interceptor.post_read_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8438,11 +8440,10 @@ def __call__( resp = self._interceptor.post_update_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8766,11 +8767,10 @@ def __call__( resp = self._interceptor.post_update_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_update_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_update_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -8935,11 +8935,10 @@ def __call__( resp = self._interceptor.post_write_tensorboard_experiment_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_write_tensorboard_experiment_data_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_write_tensorboard_experiment_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_asyncio.py index 924e6543fc..29eaa1c3e9 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_asyncio.py @@ -56,9 +56,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( @@ -2482,11 +2480,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardRuns._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_create_tensorboard_runs( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_create_tensorboard_runs( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardRuns._get_transcoded_request( http_options, request @@ -2557,11 +2554,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_create_tensorboard_runs(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_create_tensorboard_runs_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_create_tensorboard_runs_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2658,11 +2654,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_create_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_create_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchCreateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -2737,11 +2732,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -2835,11 +2829,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_read_tensorboard_time_series_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_read_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseBatchReadTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -2909,11 +2902,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_batch_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3174,11 +3166,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseCreateTensorboardExperiment._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_tensorboard_experiment( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_tensorboard_experiment( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseCreateTensorboardExperiment._get_transcoded_request( http_options, request @@ -3249,11 +3240,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3517,11 +3507,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseCreateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseCreateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -3592,11 +3581,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_create_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_create_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -3851,11 +3839,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardExperiment._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_tensorboard_experiment( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_tensorboard_experiment( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardExperiment._get_transcoded_request( http_options, request @@ -3921,11 +3908,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4176,11 +4162,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseDeleteTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -4246,11 +4231,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_delete_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_delete_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4343,11 +4327,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_export_tensorboard_time_series_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_export_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseExportTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -4422,11 +4405,10 @@ async def __call__( resp ) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_export_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_export_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -4748,11 +4730,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5070,11 +5051,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_get_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_get_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5168,11 +5148,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseListTensorboardExperiments._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_tensorboard_experiments( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_tensorboard_experiments( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseListTensorboardExperiments._get_transcoded_request( http_options, request @@ -5238,11 +5217,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_tensorboard_experiments(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_tensorboard_experiments_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_tensorboard_experiments_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5658,11 +5636,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseListTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseListTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -5728,11 +5705,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_list_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_list_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -5892,12 +5868,28 @@ async def __call__( ) resp = await self._interceptor.post_read_tensorboard_blob_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_read_tensorboard_blob_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_read_tensorboard_blob_data_with_metadata( + resp, response_metadata + ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", + extra={ + "serviceName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "rpcName": "ReadTensorboardBlobData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _ReadTensorboardSize( @@ -6127,11 +6119,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseReadTensorboardTimeSeriesData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_read_tensorboard_time_series_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_read_tensorboard_time_series_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseReadTensorboardTimeSeriesData._get_transcoded_request( http_options, request @@ -6197,11 +6188,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_read_tensorboard_time_series_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_read_tensorboard_time_series_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_read_tensorboard_time_series_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6623,11 +6613,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardExperiment._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_tensorboard_experiment( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_tensorboard_experiment( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardExperiment._get_transcoded_request( http_options, request @@ -6698,11 +6687,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_tensorboard_experiment(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_tensorboard_experiment_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_tensorboard_experiment_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -6966,11 +6954,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardTimeSeries._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_tensorboard_time_series( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_tensorboard_time_series( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseUpdateTensorboardTimeSeries._get_transcoded_request( http_options, request @@ -7041,11 +7028,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_tensorboard_time_series(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_tensorboard_time_series_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_tensorboard_time_series_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7142,11 +7128,10 @@ async def __call__( _BaseTensorboardServiceRestTransport._BaseWriteTensorboardExperimentData._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_write_tensorboard_experiment_data( - request, metadata + request, metadata = ( + await self._interceptor.pre_write_tensorboard_experiment_data( + request, metadata + ) ) transcoded_request = _BaseTensorboardServiceRestTransport._BaseWriteTensorboardExperimentData._get_transcoded_request( http_options, request @@ -7219,11 +7204,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_write_tensorboard_experiment_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_write_tensorboard_experiment_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_write_tensorboard_experiment_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG @@ -7388,11 +7372,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_write_tensorboard_run_data(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_write_tensorboard_run_data_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_write_tensorboard_run_data_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_base.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_base.py index 4c59eecaae..53d784edb9 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest_base.py @@ -33,9 +33,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py index 1bb62f67f9..5bde90df68 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py @@ -47,9 +47,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import operation as gca_operation diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py index 0ec5d0e15b..3521d51cf7 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py @@ -63,9 +63,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -744,11 +742,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = VertexRagDataServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + VertexRagDataServiceClient._read_environment_variables() + ) self._client_cert_source = VertexRagDataServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest_asyncio.py index f8cfa58d07..06a60ede17 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest_asyncio.py @@ -2969,11 +2969,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_rag_engine_config(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_update_rag_engine_config_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_update_rag_engine_config_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py index 70306999ac..36896c5815 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py @@ -623,11 +623,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = VertexRagServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + VertexRagServiceClient._read_environment_variables() + ) self._client_cert_source = VertexRagServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index 39bfc64adc..0c74270eed 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -673,11 +673,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = VizierServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + VizierServiceClient._read_environment_variables() + ) self._client_cert_source = VizierServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py index 43c4be18b9..e3fccd6884 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py @@ -3624,11 +3624,10 @@ def __call__( resp = self._interceptor.post_check_trial_early_stopping_state(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = self._interceptor.post_check_trial_early_stopping_state_with_metadata( - resp, response_metadata + resp, _ = ( + self._interceptor.post_check_trial_early_stopping_state_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest_asyncio.py index 4f0ff724ba..244ef3f954 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest_asyncio.py @@ -1531,11 +1531,10 @@ async def __call__( _BaseVizierServiceRestTransport._BaseCheckTrialEarlyStoppingState._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_check_trial_early_stopping_state( - request, metadata + request, metadata = ( + await self._interceptor.pre_check_trial_early_stopping_state( + request, metadata + ) ) transcoded_request = _BaseVizierServiceRestTransport._BaseCheckTrialEarlyStoppingState._get_transcoded_request( http_options, request @@ -1606,11 +1605,10 @@ async def __call__( json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_check_trial_early_stopping_state(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - ( - resp, - _, - ) = await self._interceptor.post_check_trial_early_stopping_state_with_metadata( - resp, response_metadata + resp, _ = ( + await self._interceptor.post_check_trial_early_stopping_state_with_metadata( + resp, response_metadata + ) ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 8cb80aa84d..89e92f4ea0 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -1352,6 +1352,7 @@ EvaluateDatasetRun, EvaluationConfig, PartnerModelTuningSpec, + PreTunedModel, SupervisedHyperParameters, SupervisedTuningDatasetDistribution, SupervisedTuningDataStats, @@ -2542,6 +2543,7 @@ "EvaluateDatasetRun", "EvaluationConfig", "PartnerModelTuningSpec", + "PreTunedModel", "SupervisedHyperParameters", "SupervisedTuningDatasetDistribution", "SupervisedTuningDataStats", diff --git a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py index ee6a121999..62e6dec05d 100644 --- a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py @@ -29,7 +29,7 @@ class AcceleratorType(proto.Enum): - r"""LINT: LEGACY_NAMES Represents a hardware accelerator type. + r"""Represents a hardware accelerator type. Values: ACCELERATOR_TYPE_UNSPECIFIED (0): @@ -63,6 +63,8 @@ class AcceleratorType(proto.Enum): Nvidia B200 GPU. NVIDIA_GB200 (17): Nvidia GB200 GPU. + NVIDIA_RTX_PRO_6000 (18): + Nvidia RTX Pro 6000 GPU. TPU_V2 (6): TPU v2. TPU_V3 (7): @@ -87,6 +89,7 @@ class AcceleratorType(proto.Enum): NVIDIA_H200_141GB = 15 NVIDIA_B200 = 16 NVIDIA_GB200 = 17 + NVIDIA_RTX_PRO_6000 = 18 TPU_V2 = 6 TPU_V3 = 7 TPU_V4_POD = 10 diff --git a/google/cloud/aiplatform_v1beta1/types/annotation.py b/google/cloud/aiplatform_v1beta1/types/annotation.py index 0064052593..aed28c7666 100644 --- a/google/cloud/aiplatform_v1beta1/types/annotation.py +++ b/google/cloud/aiplatform_v1beta1/types/annotation.py @@ -80,15 +80,14 @@ class Annotation(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Annotation: - - "aiplatform.googleapis.com/annotation_set_name": - optional, name of the UI's annotation set this Annotation - belongs to. If not set, the Annotation is not visible in - the UI. + - "aiplatform.googleapis.com/annotation_set_name": optional, + name of the UI's annotation set this Annotation belongs + to. If not set, the Annotation is not visible in the UI. - - "aiplatform.googleapis.com/payload_schema": output only, - its value is the - [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] - title. + - "aiplatform.googleapis.com/payload_schema": output only, + its value is the + [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] + title. """ name: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index 8b25f30ae3..d7b16541c6 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -22,9 +22,7 @@ from google.cloud.aiplatform_v1beta1.types import ( completion_stats as gca_completion_stats, ) -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import job_state @@ -32,9 +30,7 @@ from google.cloud.aiplatform_v1beta1.types import ( manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters, ) -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import ( unmanaged_container_model as gca_unmanaged_container_model, @@ -150,18 +146,18 @@ class BatchPredictionJob(proto.Message): [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config] object: - - ``bigquery``: output includes a column named - ``explanation``. The value is a struct that conforms to - the - [Explanation][google.cloud.aiplatform.v1beta1.Explanation] - object. - - ``jsonl``: The JSON objects on each line include an - additional entry keyed ``explanation``. The value of the - entry is a JSON object that conforms to the - [Explanation][google.cloud.aiplatform.v1beta1.Explanation] - object. - - ``csv``: Generating explanations for CSV format is not - supported. + - ``bigquery``: output includes a column named + ``explanation``. The value is a struct that conforms to + the + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] + object. + - ``jsonl``: The JSON objects on each line include an + additional entry keyed ``explanation``. The value of the + entry is a JSON object that conforms to the + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] + object. + - ``csv``: Generating explanations for CSV format is not + supported. If this field is set to true, either the [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] @@ -331,49 +327,48 @@ class InstanceConfig(proto.Message): Supported values are: - - ``object``: Each input is converted to JSON object - format. - - - For ``bigquery``, each row is converted to an object. - - For ``jsonl``, each line of the JSONL input must be an - object. - - Does not apply to ``csv``, ``file-list``, - ``tf-record``, or ``tf-record-gzip``. - - - ``array``: Each input is converted to JSON array format. - - - For ``bigquery``, each row is converted to an array. - The order of columns is determined by the BigQuery - column order, unless - [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] - is populated. - [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] - must be populated for specifying field orders. - - For ``jsonl``, if each line of the JSONL input is an - object, - [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] - must be populated for specifying field orders. - - Does not apply to ``csv``, ``file-list``, - ``tf-record``, or ``tf-record-gzip``. + - ``object``: Each input is converted to JSON object format. + + - For ``bigquery``, each row is converted to an object. + - For ``jsonl``, each line of the JSONL input must be an + object. + - Does not apply to ``csv``, ``file-list``, ``tf-record``, + or ``tf-record-gzip``. + + - ``array``: Each input is converted to JSON array format. + + - For ``bigquery``, each row is converted to an array. The + order of columns is determined by the BigQuery column + order, unless + [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] + is populated. + [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] + must be populated for specifying field orders. + - For ``jsonl``, if each line of the JSONL input is an + object, + [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] + must be populated for specifying field orders. + - Does not apply to ``csv``, ``file-list``, ``tf-record``, + or ``tf-record-gzip``. If not specified, Vertex AI converts the batch prediction input as follows: - - For ``bigquery`` and ``csv``, the behavior is the same as - ``array``. The order of columns is the same as defined in - the file or table, unless - [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] - is populated. - - For ``jsonl``, the prediction instance format is - determined by each line of the input. - - For ``tf-record``/``tf-record-gzip``, each record will be - converted to an object in the format of - ``{"b64": }``, where ```` is the - Base64-encoded string of the content of the record. - - For ``file-list``, each file in the list will be - converted to an object in the format of - ``{"b64": }``, where ```` is the - Base64-encoded string of the content of the file. + - For ``bigquery`` and ``csv``, the behavior is the same as + ``array``. The order of columns is the same as defined in + the file or table, unless + [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] + is populated. + - For ``jsonl``, the prediction instance format is + determined by each line of the input. + - For ``tf-record``/``tf-record-gzip``, each record will be + converted to an object in the format of + ``{"b64": }``, where ```` is the + Base64-encoded string of the content of the record. + - For ``file-list``, each file in the list will be converted + to an object in the format of ``{"b64": }``, where + ```` is the Base64-encoded string of the content of + the file. key_field (str): The name of the field that is considered as a key. @@ -386,11 +381,11 @@ class InstanceConfig(proto.Message): value of the key field, in a field named ``key`` in the output: - - For ``jsonl`` output format, the output will have a - ``key`` field instead of the ``instance`` field. - - For ``csv``/``bigquery`` output format, the output will - have have a ``key`` column instead of the instance - feature columns. + - For ``jsonl`` output format, the output will have a + ``key`` field instead of the ``instance`` field. + - For ``csv``/``bigquery`` output format, the output will + have have a ``key`` column instead of the instance feature + columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. diff --git a/google/cloud/aiplatform_v1beta1/types/cached_content.py b/google/cloud/aiplatform_v1beta1/types/cached_content.py index 69de9502d6..a202168461 100644 --- a/google/cloud/aiplatform_v1beta1/types/cached_content.py +++ b/google/cloud/aiplatform_v1beta1/types/cached_content.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import content -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import tool from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/content.py b/google/cloud/aiplatform_v1beta1/types/content.py index f0c8974f2d..79a85856f1 100644 --- a/google/cloud/aiplatform_v1beta1/types/content.py +++ b/google/cloud/aiplatform_v1beta1/types/content.py @@ -452,11 +452,11 @@ class GenerationConfig(proto.Message): Optional. Output response mimetype of the generated candidate text. Supported mimetype: - - ``text/plain``: (default) Text output. - - ``application/json``: JSON response in the candidates. - The model needs to be prompted to output the appropriate - response type, otherwise the behavior is undefined. This - is a preview feature. + - ``text/plain``: (default) Text output. + - ``application/json``: JSON response in the candidates. The + model needs to be prompted to output the appropriate + response type, otherwise the behavior is undefined. This + is a preview feature. response_schema (google.cloud.aiplatform_v1beta1.types.Schema): Optional. The ``Schema`` object allows the definition of input and output data types. These types can be objects, but @@ -480,26 +480,26 @@ class GenerationConfig(proto.Message): supported. Specifically, only the following properties are supported: - - ``$id`` - - ``$defs`` - - ``$ref`` - - ``$anchor`` - - ``type`` - - ``format`` - - ``title`` - - ``description`` - - ``enum`` (for strings and numbers) - - ``items`` - - ``prefixItems`` - - ``minItems`` - - ``maxItems`` - - ``minimum`` - - ``maximum`` - - ``anyOf`` - - ``oneOf`` (interpreted the same as ``anyOf``) - - ``properties`` - - ``additionalProperties`` - - ``required`` + - ``$id`` + - ``$defs`` + - ``$ref`` + - ``$anchor`` + - ``type`` + - ``format`` + - ``title`` + - ``description`` + - ``enum`` (for strings and numbers) + - ``items`` + - ``prefixItems`` + - ``minItems`` + - ``maxItems`` + - ``minimum`` + - ``maximum`` + - ``anyOf`` + - ``oneOf`` (interpreted the same as ``anyOf``) + - ``properties`` + - ``additionalProperties`` + - ``required`` The non-standard ``propertyOrdering`` property may also be set. @@ -1528,8 +1528,55 @@ class Maps(proto.Message): Can be used to look up the Place. This field is a member of `oneof`_ ``_place_id``. + place_answer_sources (google.cloud.aiplatform_v1beta1.types.GroundingChunk.Maps.PlaceAnswerSources): + Sources used to generate the place answer. + This includes review snippets and photos that + were used to generate the answer, as well as + uris to flag content. """ + class PlaceAnswerSources(proto.Message): + r""" + + Attributes: + review_snippets (MutableSequence[google.cloud.aiplatform_v1beta1.types.GroundingChunk.Maps.PlaceAnswerSources.ReviewSnippet]): + Snippets of reviews that are used to generate + the answer. + """ + + class ReviewSnippet(proto.Message): + r"""Encapsulates a review snippet. + + Attributes: + review_id (str): + Id of the review referencing the place. + google_maps_uri (str): + A link to show the review on Google Maps. + title (str): + Title of the review. + """ + + review_id: str = proto.Field( + proto.STRING, + number=1, + ) + google_maps_uri: str = proto.Field( + proto.STRING, + number=2, + ) + title: str = proto.Field( + proto.STRING, + number=3, + ) + + review_snippets: MutableSequence[ + "GroundingChunk.Maps.PlaceAnswerSources.ReviewSnippet" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="GroundingChunk.Maps.PlaceAnswerSources.ReviewSnippet", + ) + uri: str = proto.Field( proto.STRING, number=1, @@ -1550,6 +1597,11 @@ class Maps(proto.Message): number=4, optional=True, ) + place_answer_sources: "GroundingChunk.Maps.PlaceAnswerSources" = proto.Field( + proto.MESSAGE, + number=5, + message="GroundingChunk.Maps.PlaceAnswerSources", + ) web: Web = proto.Field( proto.MESSAGE, @@ -1644,8 +1696,37 @@ class GroundingMetadata(proto.Message): Google Maps grounding. This field is a member of `oneof`_ ``_google_maps_widget_context_token``. + source_flagging_uris (MutableSequence[google.cloud.aiplatform_v1beta1.types.GroundingMetadata.SourceFlaggingUri]): + List of source flagging uris. This is + currently populated only for Google Maps + grounding. """ + class SourceFlaggingUri(proto.Message): + r"""Source content flagging uri for a place or review. This is + currently populated only for Google Maps grounding. + + Attributes: + source_id (str): + Id of the place or review. + flag_content_uri (str): + A link where users can flag a problem with + the source (place or review). (-- The link is + generated by Google and it does not contain + information from the user query. It may contain + information of the content it is flagging, which + can be used to identify places. --) + """ + + source_id: str = proto.Field( + proto.STRING, + number=1, + ) + flag_content_uri: str = proto.Field( + proto.STRING, + number=2, + ) + web_search_queries: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, @@ -1681,6 +1762,11 @@ class GroundingMetadata(proto.Message): number=8, optional=True, ) + source_flagging_uris: MutableSequence[SourceFlaggingUri] = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=SourceFlaggingUri, + ) class SearchEntryPoint(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 1bfb9d94d9..27d8ccbb7c 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import env_var from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import job_state @@ -253,20 +251,20 @@ class CustomJobSpec(proto.Message): For CustomJob: - - AIP_MODEL_DIR = ``/model/`` - - AIP_CHECKPOINT_DIR = - ``/checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``/logs/`` + - AIP_MODEL_DIR = ``/model/`` + - AIP_CHECKPOINT_DIR = + ``/checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``/logs/`` For CustomJob backing a Trial of HyperparameterTuningJob: - - AIP_MODEL_DIR = - ``//model/`` - - AIP_CHECKPOINT_DIR = - ``//checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``//logs/`` + - AIP_MODEL_DIR = + ``//model/`` + - AIP_CHECKPOINT_DIR = + ``//checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``//logs/`` protected_artifact_location_id (str): The ID of the location to store protected artifacts. e.g. us-central1. Populate only when diff --git a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py index 354bfce02a..f2a94e5797 100644 --- a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import job_state from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -121,10 +119,10 @@ class DataLabelingJob(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each DataLabelingJob: - - "aiplatform.googleapis.com/schema": output only, its - value is the - [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s - title. + - "aiplatform.googleapis.com/schema": output only, its value + is the + [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s + title. specialist_pools (MutableSequence[str]): The SpecialistPools' resource names associated with this job. diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index 98a78082e8..0abfb5459b 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import saved_query from google.protobuf import struct_pb2 # type: ignore @@ -90,10 +88,10 @@ class Dataset(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for each Dataset: - - "aiplatform.googleapis.com/dataset_metadata_schema": - output only, its value is the - [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - title. + - "aiplatform.googleapis.com/dataset_metadata_schema": + output only, its value is the + [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + title. saved_queries (MutableSequence[google.cloud.aiplatform_v1beta1.types.SavedQuery]): All SavedQueries belong to the Dataset will be returned in List/Get Dataset response. The annotation_specs field will diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/google/cloud/aiplatform_v1beta1/types/dataset_service.py index 0ab8823d95..1289459102 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -23,13 +23,9 @@ from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import data_item as gca_data_item from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import ( - saved_query as gca_saved_query, -) +from google.cloud.aiplatform_v1beta1.types import saved_query as gca_saved_query from google.cloud.aiplatform_v1beta1.types import tool from google.protobuf import field_mask_pb2 # type: ignore @@ -159,9 +155,9 @@ class UpdateDatasetRequest(proto.Message): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` """ dataset: gca_dataset.Dataset = proto.Field( @@ -190,7 +186,7 @@ class UpdateDatasetVersionRequest(proto.Message): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` """ dataset_version: gca_dataset_version.DatasetVersion = proto.Field( @@ -217,19 +213,19 @@ class ListDatasetsRequest(proto.Message): An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. - - ``display_name``: supports = and != - - ``metadata_schema_uri``: supports = and != - - ``labels`` supports general map functions that is: + - ``display_name``: supports = and != + - ``metadata_schema_uri``: supports = and != + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. Some examples: - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` page_size (int): The standard list page size. page_token (str): @@ -241,9 +237,9 @@ class ListDatasetsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -397,7 +393,7 @@ class ExportDataResponse(proto.Message): All of the files that are exported in this export operation. For custom code training export, only three (training, validation and test) Cloud Storage paths in wildcard format - are populated (for example, gs://.../training-*). + are populated (for example, gs://.../training-\*). """ exported_files: MutableSequence[str] = proto.RepeatedField( @@ -744,30 +740,30 @@ class SearchDataItemsRequest(proto.Message): An expression for filtering the DataItem that will be returned. - - ``data_item_id`` - for = or !=. - - ``labeled`` - for = or !=. - - ``has_annotation(ANNOTATION_SPEC_ID)`` - true only for - DataItem that have at least one annotation with - annotation_spec_id = ``ANNOTATION_SPEC_ID`` in the - context of SavedQuery or DataLabelingJob. + - ``data_item_id`` - for = or !=. + - ``labeled`` - for = or !=. + - ``has_annotation(ANNOTATION_SPEC_ID)`` - true only for + DataItem that have at least one annotation with + annotation_spec_id = ``ANNOTATION_SPEC_ID`` in the context + of SavedQuery or DataLabelingJob. For example: - - ``data_item=1`` - - ``has_annotation(5)`` + - ``data_item=1`` + - ``has_annotation(5)`` annotations_filter (str): An expression for filtering the Annotations that will be returned per DataItem. - - ``annotation_spec_id`` - for = or !=. + - ``annotation_spec_id`` - for = or !=. annotation_filters (MutableSequence[str]): An expression that specifies what Annotations will be returned per DataItem. Annotations satisfied either of the conditions will be returned. - - ``annotation_spec_id`` - for = or !=. Must specify - ``saved_query_id=`` - saved query id that annotations - should belong to. + - ``annotation_spec_id`` - for = or !=. Must specify + ``saved_query_id=`` - saved query id that annotations + should belong to. field_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields of [DataItemView][google.cloud.aiplatform.v1beta1.DataItemView] diff --git a/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py b/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py index e8b78d9c35..3e9ef44148 100644 --- a/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py +++ b/google/cloud/aiplatform_v1beta1/types/deployment_resource_pool.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import machine_resources from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index 1c6f507307..a977f7afc5 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import machine_resources @@ -179,6 +177,9 @@ class Endpoint(proto.Message): native RAG integration can be configured. Currently, only Model Garden models are supported. + private_model_server_enabled (bool): + If true, the model server will be isolated + from the external internet. """ name: str = proto.Field( @@ -279,6 +280,10 @@ class Endpoint(proto.Message): number=29, message="GenAiAdvancedFeaturesConfig", ) + private_model_server_enabled: bool = proto.Field( + proto.BOOL, + number=30, + ) class DeployedModel(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index 9b2cf644d4..9805b46fcc 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -155,26 +155,26 @@ class ListEndpointsRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``endpoint`` supports ``=`` and ``!=``. ``endpoint`` - represents the Endpoint ID, i.e. the last segment of the - Endpoint's [resource - name][google.cloud.aiplatform.v1beta1.Endpoint.name]. - - ``display_name`` supports ``=`` and ``!=``. - - ``labels`` supports general map functions that is: + - ``endpoint`` supports ``=`` and ``!=``. ``endpoint`` + represents the Endpoint ID, i.e. the last segment of the + Endpoint's [resource + name][google.cloud.aiplatform.v1beta1.Endpoint.name]. + - ``display_name`` supports ``=`` and ``!=``. + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - ``labels.key:*`` or ``labels:key`` - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - ``labels.key:*`` or ``labels:key`` - key existence + - A key including a space must be quoted. + ``labels."a key"``. - - ``base_model_name`` only supports ``=``. + - ``base_model_name`` only supports ``=``. Some examples: - - ``endpoint=1`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - - ``baseModelName="text-bison"`` + - ``endpoint=1`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``baseModelName="text-bison"`` page_size (int): Optional. The standard list page size. page_token (str): @@ -537,19 +537,19 @@ class MutateDeployedModelRequest(proto.Message): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] - - ``required_replica_count`` in - [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1beta1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1beta1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1beta1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1beta1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See [google.protobuf.FieldMask][google.protobuf.FieldMask]. diff --git a/google/cloud/aiplatform_v1beta1/types/entity_type.py b/google/cloud/aiplatform_v1beta1/types/entity_type.py index 769f784369..da882510f6 100644 --- a/google/cloud/aiplatform_v1beta1/types/entity_type.py +++ b/google/cloud/aiplatform_v1beta1/types/entity_type.py @@ -44,7 +44,7 @@ class EntityType(proto.Message): The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist - only of ASCII Latin letters A-Z and a-z and underscore(_), + only of ASCII Latin letters A-Z and a-z and underscore(\_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. description (str): diff --git a/google/cloud/aiplatform_v1beta1/types/evaluated_annotation.py b/google/cloud/aiplatform_v1beta1/types/evaluated_annotation.py index 020f58e1f2..eb19d8fd70 100644 --- a/google/cloud/aiplatform_v1beta1/types/evaluated_annotation.py +++ b/google/cloud/aiplatform_v1beta1/types/evaluated_annotation.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - explanation as gca_explanation, -) +from google.cloud.aiplatform_v1beta1.types import explanation as gca_explanation from google.protobuf import struct_pb2 # type: ignore @@ -183,8 +181,8 @@ class EvaluatedAnnotationExplanation(proto.Message): For AutoML Image Classification models, possible values are: - - ``image-integrated-gradients`` - - ``image-xrai`` + - ``image-integrated-gradients`` + - ``image-xrai`` explanation (google.cloud.aiplatform_v1beta1.types.Explanation): Explanation attribution response details. """ diff --git a/google/cloud/aiplatform_v1beta1/types/evaluation_service.py b/google/cloud/aiplatform_v1beta1/types/evaluation_service.py index a07bfcaaac..6f7cf4f960 100644 --- a/google/cloud/aiplatform_v1beta1/types/evaluation_service.py +++ b/google/cloud/aiplatform_v1beta1/types/evaluation_service.py @@ -3440,11 +3440,10 @@ class PointwiseMetricSpec(proto.Message): explanation. When this config is set, the default output is replaced with either: - - The raw output string. - - A parsed output based on a user-defined schema. If a - custom format is chosen, the ``score`` and - ``explanation`` fields in the corresponding metric result - will be empty. + - The raw output string. + - A parsed output based on a user-defined schema. If a + custom format is chosen, the ``score`` and ``explanation`` + fields in the corresponding metric result will be empty. """ metric_prompt_template: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/example_store.py b/google/cloud/aiplatform_v1beta1/types/example_store.py index 79e11ff7ee..194a597d0b 100644 --- a/google/cloud/aiplatform_v1beta1/types/example_store.py +++ b/google/cloud/aiplatform_v1beta1/types/example_store.py @@ -96,10 +96,10 @@ class ExampleStoreConfig(proto.Message): Required. The embedding model to be used for vector embedding. Immutable. Supported models: - - "textembedding-gecko@003" - - "text-embedding-004" - - "text-embedding-005" - - "text-multilingual-embedding-002". + - "textembedding-gecko@003" + - "text-embedding-004" + - "text-embedding-005" + - "text-multilingual-embedding-002". """ vertex_embedding_model: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/example_store_service.py b/google/cloud/aiplatform_v1beta1/types/example_store_service.py index c2312fae4b..5f7ae0a351 100644 --- a/google/cloud/aiplatform_v1beta1/types/example_store_service.py +++ b/google/cloud/aiplatform_v1beta1/types/example_store_service.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import example as gca_example -from google.cloud.aiplatform_v1beta1.types import ( - example_store as gca_example_store, -) +from google.cloud.aiplatform_v1beta1.types import example_store as gca_example_store from google.cloud.aiplatform_v1beta1.types import operation from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index f2214ef8a0..2e55cdadf9 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -194,20 +194,19 @@ class Attribution(proto.Message): The format of the value is determined by the feature's input format: - - If the feature is a scalar value, the attribution value - is a [floating - number][google.protobuf.Value.number_value]. + - If the feature is a scalar value, the attribution value is + a [floating number][google.protobuf.Value.number_value]. - - If the feature is an array of scalar values, the - attribution value is an - [array][google.protobuf.Value.list_value]. + - If the feature is an array of scalar values, the + attribution value is an + [array][google.protobuf.Value.list_value]. - - If the feature is a struct, the attribution value is a - [struct][google.protobuf.Value.struct_value]. The keys in - the attribution value struct are the same as the keys in - the feature struct. The formats of the values in the - attribution struct are determined by the formats of the - values in the feature struct. + - If the feature is a struct, the attribution value is a + [struct][google.protobuf.Value.struct_value]. The keys in + the attribution value struct are the same as the keys in + the feature struct. The formats of the values in the + attribution struct are determined by the formats of the + values in the feature struct. The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] @@ -243,21 +242,21 @@ class Attribution(proto.Message): caused by approximation used in the explanation method. Lower value means more precise attributions. - - For Sampled Shapley - [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], - increasing - [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] - might reduce the error. - - For Integrated Gradients - [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], - increasing - [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] - might reduce the error. - - For [XRAI - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], - increasing - [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] - might reduce the error. + - For Sampled Shapley + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], + increasing + [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] + might reduce the error. + - For Integrated Gradients + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], + increasing + [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] + might reduce the error. + - For [XRAI + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], + increasing + [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] + might reduce the error. See `this introduction `__ diff --git a/google/cloud/aiplatform_v1beta1/types/extension.py b/google/cloud/aiplatform_v1beta1/types/extension.py index 802b86b140..9ba2e4af8b 100644 --- a/google/cloud/aiplatform_v1beta1/types/extension.py +++ b/google/cloud/aiplatform_v1beta1/types/extension.py @@ -351,11 +351,11 @@ class ApiKeyConfig(proto.Message): resource storing the API key. Format: ``projects/{project}/secrets/{secrete}/versions/{version}`` - - If specified, the ``secretmanager.versions.access`` - permission should be granted to Vertex AI Extension - Service Agent - (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) - on the specified resource. + - If specified, the ``secretmanager.versions.access`` + permission should be granted to Vertex AI Extension + Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the specified resource. http_element_location (google.cloud.aiplatform_v1beta1.types.HttpElementLocation): Required. The location of the API key. """ @@ -383,11 +383,11 @@ class HttpBasicAuthConfig(proto.Message): resource storing the base64 encoded credentials. Format: ``projects/{project}/secrets/{secrete}/versions/{version}`` - - If specified, the ``secretmanager.versions.access`` - permission should be granted to Vertex AI Extension - Service Agent - (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) - on the specified resource. + - If specified, the ``secretmanager.versions.access`` + permission should be granted to Vertex AI Extension + Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the specified resource. """ credential_secret: str = proto.Field( @@ -403,14 +403,14 @@ class GoogleServiceAccountConfig(proto.Message): Optional. The service account that the extension execution service runs as. - - If the service account is specified, the - ``iam.serviceAccounts.getAccessToken`` permission should - be granted to Vertex AI Extension Service Agent - (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) - on the specified service account. + - If the service account is specified, the + ``iam.serviceAccounts.getAccessToken`` permission should + be granted to Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the specified service account. - - If not specified, the Vertex AI Extension Service Agent - will be used to execute the Extension. + - If not specified, the Vertex AI Extension Service Agent + will be used to execute the Extension. """ service_account: str = proto.Field( @@ -439,11 +439,11 @@ class OauthConfig(proto.Message): The service account used to generate access tokens for executing the Extension. - - If the service account is specified, the - ``iam.serviceAccounts.getAccessToken`` permission should - be granted to Vertex AI Extension Service Agent - (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) - on the provided service account. + - If the service account is specified, the + ``iam.serviceAccounts.getAccessToken`` permission should + be granted to Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the provided service account. This field is a member of `oneof`_ ``oauth_config``. """ @@ -483,13 +483,13 @@ class OidcConfig(proto.Message): Provider (accounts.google.com) for extension endpoint (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). - - The audience for the token will be set to the URL in the - server url defined in the OpenApi spec. + - The audience for the token will be set to the URL in the + server url defined in the OpenApi spec. - - If the service account is provided, the service account - should grant ``iam.serviceAccounts.getOpenIdToken`` - permission to Vertex AI Extension Service Agent - (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + - If the service account is provided, the service account + should grant ``iam.serviceAccounts.getOpenIdToken`` + permission to Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). This field is a member of `oneof`_ ``oidc_config``. """ @@ -661,11 +661,11 @@ class ExtensionPrivateServiceConnectConfig(proto.Message): registered. Format: ``projects/{project_id}/locations/{location_id}/namespaces/{namespace_id}/services/{service_id}`` - - The Vertex AI Extension Service Agent - (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) - should be granted ``servicedirectory.viewer`` and - ``servicedirectory.pscAuthorizedService`` roles on the - resource. + - The Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + should be granted ``servicedirectory.viewer`` and + ``servicedirectory.pscAuthorizedService`` roles on the + resource. """ service_directory: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/extension_registry_service.py b/google/cloud/aiplatform_v1beta1/types/extension_registry_service.py index 1b4cdd99c5..dff79b20d6 100644 --- a/google/cloud/aiplatform_v1beta1/types/extension_registry_service.py +++ b/google/cloud/aiplatform_v1beta1/types/extension_registry_service.py @@ -151,9 +151,9 @@ class ListExtensionsRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. """ diff --git a/google/cloud/aiplatform_v1beta1/types/feature.py b/google/cloud/aiplatform_v1beta1/types/feature.py index 8ac5f7afd3..d821b3b43b 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature.py +++ b/google/cloud/aiplatform_v1beta1/types/feature.py @@ -45,7 +45,7 @@ class Feature(proto.Message): The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscore(_), and ASCII + ASCII Latin letters A-Z and a-z, underscore(\_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. description (str): diff --git a/google/cloud/aiplatform_v1beta1/types/feature_online_store.py b/google/cloud/aiplatform_v1beta1/types/feature_online_store.py index e08bd6e896..85a8e7c160 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_online_store.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_online_store.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import service_networking from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py b/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py index 8084d83f8f..4ddc68967b 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_online_store_admin_service.py @@ -22,9 +22,7 @@ from google.cloud.aiplatform_v1beta1.types import ( feature_online_store as gca_feature_online_store, ) -from google.cloud.aiplatform_v1beta1.types import ( - feature_view as gca_feature_view, -) +from google.cloud.aiplatform_v1beta1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1beta1.types import ( feature_view_sync as gca_feature_view_sync, ) @@ -126,20 +124,20 @@ class ListFeatureOnlineStoresRequest(proto.Message): Lists the FeatureOnlineStores that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality and key presence. Examples: - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - FeatureOnlineStores created or updated after 2020-01-01. - - ``labels.env = "prod"`` FeatureOnlineStores with label - "env" set to "prod". + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + FeatureOnlineStores created or updated after 2020-01-01. + - ``labels.env = "prod"`` FeatureOnlineStores with label + "env" set to "prod". page_size (int): The maximum number of FeatureOnlineStores to return. The service may return fewer than this @@ -160,8 +158,8 @@ class ListFeatureOnlineStoresRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -238,11 +236,11 @@ class UpdateFeatureOnlineStoreRequest(proto.Message): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` """ feature_online_store: gca_feature_online_store.FeatureOnlineStore = proto.Field( @@ -358,25 +356,25 @@ class ListFeatureViewsRequest(proto.Message): Lists the FeatureViews that match the filter expression. The following filters are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality as well as key + presence. Examples: - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> FeatureViews created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - FeatureViews having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any FeatureView which has a label - with 'env' as the key. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> FeatureViews created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + FeatureViews having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any FeatureView which has a label + with 'env' as the key. page_size (int): The maximum number of FeatureViews to return. The service may return fewer than this value. If @@ -398,9 +396,9 @@ class ListFeatureViewsRequest(proto.Message): Supported fields: - - ``feature_view_id`` - - ``create_time`` - - ``update_time`` + - ``feature_view_id`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -475,15 +473,15 @@ class UpdateFeatureViewRequest(proto.Message): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` """ feature_view: gca_feature_view.FeatureView = proto.Field( @@ -635,15 +633,15 @@ class ListFeatureViewSyncsRequest(proto.Message): Lists the FeatureViewSyncs that match the filter expression. The following filters are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. Examples: - - ``create_time > \"2020-01-31T15:30:00.000000Z\"`` --> - FeatureViewSyncs created after - 2020-01-31T15:30:00.000000Z. + - ``create_time > \"2020-01-31T15:30:00.000000Z\"`` --> + FeatureViewSyncs created after + 2020-01-31T15:30:00.000000Z. page_size (int): The maximum number of FeatureViewSyncs to return. The service may return fewer than this @@ -666,7 +664,7 @@ class ListFeatureViewSyncsRequest(proto.Message): Supported fields: - - ``create_time`` + - ``create_time`` """ parent: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py b/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py index ed3e957aa7..c19cb202f2 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_registry_service.py @@ -19,12 +19,8 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - feature_group as gca_feature_group, -) -from google.cloud.aiplatform_v1beta1.types import ( - feature_monitor as gca_feature_monitor, -) +from google.cloud.aiplatform_v1beta1.types import feature_group as gca_feature_group +from google.cloud.aiplatform_v1beta1.types import feature_monitor as gca_feature_monitor from google.cloud.aiplatform_v1beta1.types import ( feature_monitor_job as gca_feature_monitor_job, ) @@ -127,20 +123,20 @@ class ListFeatureGroupsRequest(proto.Message): Lists the FeatureGroups that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality and key presence. Examples: - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - FeatureGroups created or updated after 2020-01-01. - - ``labels.env = "prod"`` FeatureGroups with label "env" - set to "prod". + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + FeatureGroups created or updated after 2020-01-01. + - ``labels.env = "prod"`` FeatureGroups with label "env" set + to "prod". page_size (int): The maximum number of FeatureGroups to return. The service may return fewer than this @@ -160,8 +156,8 @@ class ListFeatureGroupsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -238,10 +234,10 @@ class UpdateFeatureGroupRequest(proto.Message): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` """ feature_group: gca_feature_group.FeatureGroup = proto.Field( @@ -348,20 +344,20 @@ class ListFeatureMonitorsRequest(proto.Message): Optional. Lists the FeatureMonitors that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality and key presence. Examples: - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - FeatureMonitors created or updated after 2020-01-01. - - ``labels.env = "prod"`` FeatureGroups with label "env" - set to "prod". + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + FeatureMonitors created or updated after 2020-01-01. + - ``labels.env = "prod"`` FeatureGroups with label "env" set + to "prod". page_size (int): Optional. The maximum number of FeatureGroups to return. The service may return fewer than @@ -382,8 +378,8 @@ class ListFeatureMonitorsRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` """ parent: str = proto.Field( @@ -429,7 +425,7 @@ class UpdateFeatureMonitorRequest(proto.Message): Updatable fields: - - ``labels`` + - ``labels`` """ feature_monitor: gca_feature_monitor.FeatureMonitor = proto.Field( @@ -643,13 +639,13 @@ class ListFeatureMonitorJobsRequest(proto.Message): Optional. Lists the FeatureMonitorJobs that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be Examples: - - ``create_time > "2020-01-01"`` FeatureMonitorJobs created - after 2020-01-01. + - ``create_time > "2020-01-01"`` FeatureMonitorJobs created + after 2020-01-01. page_size (int): Optional. The maximum number of FeatureMonitorJobs to return. The service may @@ -670,7 +666,7 @@ class ListFeatureMonitorJobsRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` + - ``create_time`` """ parent: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/feature_selector.py b/google/cloud/aiplatform_v1beta1/types/feature_selector.py index a507a0f97b..6ca120578d 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_selector.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_selector.py @@ -36,10 +36,10 @@ class IdMatcher(proto.Message): ids (MutableSequence[str]): Required. The following are accepted as ``ids``: - - A single-element list containing only ``*``, which - selects all Features in the target EntityType, or - - A list containing only Feature IDs, which selects only - Features with those IDs in the target EntityType. + - A single-element list containing only ``*``, which selects + all Features in the target EntityType, or + - A list containing only Feature IDs, which selects only + Features with those IDs in the target EntityType. """ ids: MutableSequence[str] = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1beta1/types/feature_view.py b/google/cloud/aiplatform_v1beta1/types/feature_view.py index 48e6ef3dfd..6e2749376f 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_view.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_view.py @@ -541,13 +541,13 @@ class VertexRagSource(proto.Message): materialized on each manual sync trigger. The table/view is expected to have the following columns and types at least: - - ``corpus_id`` (STRING, NULLABLE/REQUIRED) - - ``file_id`` (STRING, NULLABLE/REQUIRED) - - ``chunk_id`` (STRING, NULLABLE/REQUIRED) - - ``chunk_data_type`` (STRING, NULLABLE/REQUIRED) - - ``chunk_data`` (STRING, NULLABLE/REQUIRED) - - ``embeddings`` (FLOAT, REPEATED) - - ``file_original_uri`` (STRING, NULLABLE/REQUIRED) + - ``corpus_id`` (STRING, NULLABLE/REQUIRED) + - ``file_id`` (STRING, NULLABLE/REQUIRED) + - ``chunk_id`` (STRING, NULLABLE/REQUIRED) + - ``chunk_data_type`` (STRING, NULLABLE/REQUIRED) + - ``chunk_data`` (STRING, NULLABLE/REQUIRED) + - ``embeddings`` (FLOAT, REPEATED) + - ``file_original_uri`` (STRING, NULLABLE/REQUIRED) rag_corpus_id (int): Optional. The RAG corpus id corresponding to this FeatureView. diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py index f47c193f62..a89650eec8 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index 500d79b2dd..f76d8b4c8b 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -19,17 +19,13 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import feature_monitor from google.cloud.aiplatform_v1beta1.types import ( feature_selector as gca_feature_selector, ) -from google.cloud.aiplatform_v1beta1.types import ( - featurestore as gca_featurestore, -) +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -152,23 +148,23 @@ class ListFeaturestoresRequest(proto.Message): Lists the featurestores that match the filter expression. The following fields are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``online_serving_config.fixed_node_count``: Supports - ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` - comparisons. - - ``labels``: Supports key-value equality and key presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``online_serving_config.fixed_node_count``: Supports + ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` + comparisons. + - ``labels``: Supports key-value equality and key presence. Examples: - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - Featurestores created or updated after 2020-01-01. - - ``labels.env = "prod"`` Featurestores with label "env" - set to "prod". + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + Featurestores created or updated after 2020-01-01. + - ``labels.env = "prod"`` Featurestores with label "env" set + to "prod". page_size (int): The maximum number of Featurestores to return. The service may return fewer than this @@ -188,9 +184,9 @@ class ListFeaturestoresRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported Fields: - - ``create_time`` - - ``update_time`` - - ``online_serving_config.fixed_node_count`` + - ``create_time`` + - ``update_time`` + - ``online_serving_config.fixed_node_count`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ @@ -272,10 +268,10 @@ class UpdateFeaturestoreRequest(proto.Message): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` """ featurestore: gca_featurestore.Featurestore = proto.Field( @@ -474,10 +470,10 @@ class ImportFeatureValuesResponse(proto.Message): The number of rows in input source that weren't imported due to either - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). timestamp_outside_retention_rows_count (int): The number rows that weren't ingested due to having feature timestamps outside the retention @@ -948,25 +944,25 @@ class ListEntityTypesRequest(proto.Message): Lists the EntityTypes that match the filter expression. The following filters are supported: - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality as well as key + presence. Examples: - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - EntityTypes having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any EntityType which has a label - with 'env' as the key. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + EntityTypes having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any EntityType which has a label + with 'env' as the key. page_size (int): The maximum number of EntityTypes to return. The service may return fewer than this value. If @@ -988,9 +984,9 @@ class ListEntityTypesRequest(proto.Message): Supported fields: - - ``entity_type_id`` - - ``create_time`` - - ``update_time`` + - ``entity_type_id`` + - ``create_time`` + - ``update_time`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ @@ -1072,16 +1068,16 @@ class UpdateEntityTypeRequest(proto.Message): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` """ entity_type: gca_entity_type.EntityType = proto.Field( @@ -1259,26 +1255,24 @@ class ListFeaturesRequest(proto.Message): Lists the Features that match the filter expression. The following filters are supported: - - ``value_type``: Supports = and != comparisons. - - ``create_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. + - ``value_type``: Supports = and != comparisons. + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. Examples: - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - 'env' as the key. + - ``value_type = DOUBLE`` --> Features whose type is DOUBLE. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> Features + having both (active: yes) and (env: prod) labels. + - ``labels.env: *`` --> Any Feature which has a label with + 'env' as the key. page_size (int): The maximum number of Features to return. The service may return fewer than this value. If @@ -1302,11 +1296,10 @@ class ListFeaturesRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``feature_id`` - - ``value_type`` (Not supported for FeatureRegistry - Feature) - - ``create_time`` - - ``update_time`` + - ``feature_id`` + - ``value_type`` (Not supported for FeatureRegistry Feature) + - ``create_time`` + - ``update_time`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. latest_stats_count (int): @@ -1401,14 +1394,14 @@ class SearchFeaturesRequest(proto.Message): FIELD. The QUERY and the FIELD are converted to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing the - search value. Characters that are not one of alphanumeric - ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are - treated as delimiters for tokens. ``*`` is treated as a - wildcard that matches characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing the + search value. Characters that are not one of alphanumeric + ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are + treated as delimiters for tokens. ``*`` is treated as a + wildcard that matches characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double quotation @@ -1418,47 +1411,45 @@ class SearchFeaturesRequest(proto.Message): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature with ID - containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches a - Feature with ID containing the substring ``foo`` and - description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with ID + containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as key - presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> Features + having both (active: yes) and (env: prod) labels. + - ``labels.env: *`` --> Any Feature which has a label with + ``env`` as the key. page_size (int): The maximum number of Features to return. The service may return fewer than this value. If @@ -1504,11 +1495,11 @@ class SearchFeaturesResponse(proto.Message): Fields returned: - - ``name`` - - ``description`` - - ``labels`` - - ``create_time`` - - ``update_time`` + - ``name`` + - ``description`` + - ``labels`` + - ``create_time`` + - ``update_time`` next_page_token (str): A token, which can be sent as [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token] @@ -1554,12 +1545,12 @@ class UpdateFeatureRequest(proto.Message): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) """ feature: gca_feature.Feature = proto.Field( @@ -1643,10 +1634,10 @@ class ImportFeatureValuesOperationMetadata(proto.Message): The number of rows in input source that weren't imported due to either - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). timestamp_outside_retention_rows_count (int): The number rows that weren't ingested due to having timestamps outside the retention diff --git a/google/cloud/aiplatform_v1beta1/types/gen_ai_cache_service.py b/google/cloud/aiplatform_v1beta1/types/gen_ai_cache_service.py index e2c669e6d0..293a285343 100644 --- a/google/cloud/aiplatform_v1beta1/types/gen_ai_cache_service.py +++ b/google/cloud/aiplatform_v1beta1/types/gen_ai_cache_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.protobuf import field_mask_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/genai_tuning_service.py b/google/cloud/aiplatform_v1beta1/types/genai_tuning_service.py index f2241d81c8..1adc3daedd 100644 --- a/google/cloud/aiplatform_v1beta1/types/genai_tuning_service.py +++ b/google/cloud/aiplatform_v1beta1/types/genai_tuning_service.py @@ -21,9 +21,7 @@ from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py index 127126b1b8..50eefe529a 100644 --- a/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py +++ b/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import study from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/index.py b/google/cloud/aiplatform_v1beta1/types/index.py index 07a0a07038..920ea69ec9 100644 --- a/google/cloud/aiplatform_v1beta1/types/index.py +++ b/google/cloud/aiplatform_v1beta1/types/index.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py index c59aed3118..c05f2b4063 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import service_networking from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py index ae6327b7e1..74f5e99e76 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -119,25 +117,25 @@ class ListIndexEndpointsRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``index_endpoint`` supports = and !=. ``index_endpoint`` - represents the IndexEndpoint ID, ie. the last segment of - the IndexEndpoint's - [resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name]. - - ``display_name`` supports =, != and regex() (uses - `re2 `__ - syntax) - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality - ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a - key"`. + - ``index_endpoint`` supports = and !=. ``index_endpoint`` + represents the IndexEndpoint ID, ie. the last segment of + the IndexEndpoint's + [resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name]. + - ``display_name`` supports =, != and regex() (uses + `re2 `__ + syntax) + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality + ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a + key"\`. Some examples: - - ``index_endpoint="1"`` - - ``display_name="myDisplayName"`` - - \`regex(display_name, "^A") -> The display name starts - with an A. - - ``labels.myKey="myValue"`` + - ``index_endpoint="1"`` + - ``display_name="myDisplayName"`` + - \`regex(display_name, "^A") -> The display name starts + with an A. + - ``labels.myKey="myValue"`` page_size (int): Optional. The standard list page size. page_token (str): diff --git a/google/cloud/aiplatform_v1beta1/types/index_service.py b/google/cloud/aiplatform_v1beta1/types/index_service.py index 9f09c0e3bf..d445c8a124 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_service.py @@ -495,8 +495,8 @@ class UpsertDatapointsRequest(proto.Message): Updatable fields: - - Use ``all_restricts`` to update both restricts and - numeric_restricts. + - Use ``all_restricts`` to update both restricts and + numeric_restricts. """ index: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/io.py b/google/cloud/aiplatform_v1beta1/types/io.py index 654a4158b5..c885e6b202 100644 --- a/google/cloud/aiplatform_v1beta1/types/io.py +++ b/google/cloud/aiplatform_v1beta1/types/io.py @@ -117,8 +117,8 @@ class BigQuerySource(proto.Message): Required. BigQuery URI to a table, up to 2000 characters long. Accepted forms: - - BigQuery path. For example: - ``bq://projectId.bqDatasetId.bqTableId``. + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. """ input_uri: str = proto.Field( @@ -141,9 +141,9 @@ class BigQueryDestination(proto.Message): Accepted forms: - - BigQuery path. For example: ``bq://projectId`` or - ``bq://projectId.bqDatasetId`` or - ``bq://projectId.bqDatasetId.bqTableId``. + - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId`` or + ``bq://projectId.bqDatasetId.bqTableId``. """ output_uri: str = proto.Field( @@ -191,11 +191,11 @@ class ContainerRegistryDestination(proto.Message): Google Container Registry and Artifact Registry are supported now. Accepted forms: - - Google Container Registry path. For example: - ``gcr.io/projectId/imageName:tag``. + - Google Container Registry path. For example: + ``gcr.io/projectId/imageName:tag``. - - Artifact Registry path. For example: - ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. + - Artifact Registry path. For example: + ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. If a tag is not specified, "latest" will be used as the default tag. diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py index c1900d495c..86430e8c36 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_service.py +++ b/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -22,9 +22,7 @@ from google.cloud.aiplatform_v1beta1.types import ( batch_prediction_job as gca_batch_prediction_job, ) -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, ) @@ -145,24 +143,24 @@ class ListCustomJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -311,24 +309,24 @@ class ListDataLabelingJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -492,24 +490,24 @@ class ListHyperparameterTuningJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -665,24 +663,24 @@ class ListNasJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -914,26 +912,25 @@ class ListBatchPredictionJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``model_display_name`` supports ``=``, ``!=`` - comparisons. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``model_display_name`` supports ``=``, ``!=`` comparisons. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -1225,24 +1222,24 @@ class ListModelDeploymentMonitoringJobsRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` - - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` - - ``NOT display_name="my_job"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``labels.keyA=valueA`` - - ``labels.keyB:*`` + - ``state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"`` + - ``state!="JOB_STATE_FAILED" OR display_name="my_job"`` + - ``NOT display_name="my_job"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``labels.keyA=valueA`` + - ``labels.keyB:*`` page_size (int): The standard list page size. page_token (str): @@ -1327,17 +1324,17 @@ class UpdateModelDeploymentMonitoringJobRequest(proto.Message): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` """ model_deployment_monitoring_job: ( diff --git a/google/cloud/aiplatform_v1beta1/types/llm_utility_service.py b/google/cloud/aiplatform_v1beta1/types/llm_utility_service.py index 8531002006..174130b284 100644 --- a/google/cloud/aiplatform_v1beta1/types/llm_utility_service.py +++ b/google/cloud/aiplatform_v1beta1/types/llm_utility_service.py @@ -49,7 +49,7 @@ class ComputeTokensRequest(proto.Message): model (str): Optional. The name of the publisher model requested to serve the prediction. Format: - projects/{project}/locations/{location}/publishers/\ */models/* + projects/{project}/locations/{location}/publishers/*/models/* contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): Optional. Input content. """ diff --git a/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/google/cloud/aiplatform_v1beta1/types/machine_resources.py index 8c248f110f..d3f010fba1 100644 --- a/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -454,10 +454,10 @@ class AutoscalingMetricSpec(proto.Message): metric_name (str): Required. The resource metric name. Supported metrics: - - For Online Prediction: - - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - - ``aiplatform.googleapis.com/prediction/online/request_count`` + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + - ``aiplatform.googleapis.com/prediction/online/request_count`` target (int): The target resource utilization in percentage (1% - 100%) for the given metric; once the real diff --git a/google/cloud/aiplatform_v1beta1/types/memory_bank.py b/google/cloud/aiplatform_v1beta1/types/memory_bank.py index 894c034052..a077f50534 100644 --- a/google/cloud/aiplatform_v1beta1/types/memory_bank.py +++ b/google/cloud/aiplatform_v1beta1/types/memory_bank.py @@ -74,7 +74,7 @@ class Memory(proto.Message): Required. Immutable. The scope of the Memory. Memories are isolated within their scope. The scope is defined when creating or generating memories. Scope values cannot contain - the wildcard character '*'. + the wildcard character '\*'. """ expire_time: timestamp_pb2.Timestamp = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/memory_bank_service.py b/google/cloud/aiplatform_v1beta1/types/memory_bank_service.py index b3b2771ea0..3b66c064c6 100644 --- a/google/cloud/aiplatform_v1beta1/types/memory_bank_service.py +++ b/google/cloud/aiplatform_v1beta1/types/memory_bank_service.py @@ -167,7 +167,7 @@ class ListMemoriesRequest(proto.Message): Supported fields (equality match only): - - ``scope`` (as a JSON string) + - ``scope`` (as a JSON string) page_size (int): Optional. The standard list page size. page_token (str): @@ -299,7 +299,7 @@ class GenerateMemoriesRequest(proto.Message): with the same scope. Must be provided unless the scope is defined in the source content. If ``scope`` is provided, it will override the scope defined in the source content. Scope - values cannot contain the wildcard character '*'. + values cannot contain the wildcard character '\*'. """ class VertexSessionSource(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/google/cloud/aiplatform_v1beta1/types/metadata_service.py index f686f6b720..dd0700a9ae 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_service.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -23,12 +23,8 @@ from google.cloud.aiplatform_v1beta1.types import context as gca_context from google.cloud.aiplatform_v1beta1.types import event from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) -from google.cloud.aiplatform_v1beta1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1beta1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -351,25 +347,25 @@ class ListArtifactsRequest(proto.Message): define filter query is based on https://google.aip.dev/160. The supported set of filters include the following: - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` - - **Context based filtering**: To filter Artifacts based on - the contexts to which they belong, use the function - operator with the full resource name - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` In case the field + name contains special characters (such as colon), one can + embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` + - **Context based filtering**: To filter Artifacts based on + the contexts to which they belong, use the function + operator with the full resource name + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` Each of the above supported filter types can be combined together using logical operators (``AND`` & ``OR``). Maximum @@ -656,32 +652,31 @@ class ListContextsRequest(proto.Message): define filter query is based on https://google.aip.dev/160. Following are the supported set of filters: - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0``. In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` - - - **Parent Child filtering**: To filter Contexts based on - parent-child relationship use the HAS operator as - follows: - - :: - - parent_contexts: - "projects//locations//metadataStores//contexts/" - child_contexts: - "projects//locations//metadataStores//contexts/" + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such as + ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0``. In case the + field name contains special characters (such as colon), + one can embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` + + - **Parent Child filtering**: To filter Contexts based on + parent-child relationship use the HAS operator as follows: + + :: + + parent_contexts: + "projects//locations//metadataStores//contexts/" + child_contexts: + "projects//locations//metadataStores//contexts/" Each of the above supported filters can be combined together using logical operators (``AND`` & ``OR``). Maximum nested @@ -1103,25 +1098,25 @@ class ListExecutionsRequest(proto.Message): https://google.aip.dev/160. Following are the supported set of filters: - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``state``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..`` For example: - ``metadata.field_1.number_value = 10.0`` In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` - - **Context based filtering**: To filter Executions based - on the contexts to which they belong use the function - operator with the full resource name: - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``state``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such as + ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..`` For example: + ``metadata.field_1.number_value = 10.0`` In case the field + name contains special characters (such as colon), one can + embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` + - **Context based filtering**: To filter Executions based on + the contexts to which they belong use the function + operator with the full resource name: + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` Each of the above supported filters can be combined together using logical operators (``AND`` & ``OR``). Maximum nested @@ -1535,20 +1530,20 @@ class QueryArtifactLineageSubgraphRequest(proto.Message): https://google.aip.dev/160. The supported set of filters include the following: - - **Attribute filtering**: For example: - ``display_name = "test"`` Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` In case the - field name contains special characters (such as colon), - one can embed it inside double quote. For example: - ``metadata."field:1".number_value = 10.0`` + - **Attribute filtering**: For example: + ``display_name = "test"`` Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` In case the field + name contains special characters (such as colon), one can + embed it inside double quote. For example: + ``metadata."field:1".number_value = 10.0`` Each of the above supported filter types can be combined together using logical operators (``AND`` & ``OR``). Maximum diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/google/cloud/aiplatform_v1beta1/types/metadata_store.py index 6f1ffe7924..c9b49214a4 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_store.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_store.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/migratable_resource.py b/google/cloud/aiplatform_v1beta1/types/migratable_resource.py index 8c7cc36dec..28303da4ed 100644 --- a/google/cloud/aiplatform_v1beta1/types/migratable_resource.py +++ b/google/cloud/aiplatform_v1beta1/types/migratable_resource.py @@ -80,10 +80,10 @@ class MlEngineModelVersion(proto.Message): The ml.googleapis.com endpoint that this model Version currently lives in. Example values: - - ml.googleapis.com - - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com + - ml.googleapis.com + - us-centrall-ml.googleapis.com + - europe-west4-ml.googleapis.com + - asia-east1-ml.googleapis.com version (str): Full resource name of ml engine model Version. Format: ``projects/{project}/models/{model}/versions/{version}``. diff --git a/google/cloud/aiplatform_v1beta1/types/migration_service.py b/google/cloud/aiplatform_v1beta1/types/migration_service.py index 77aa401549..5d3b025b6b 100644 --- a/google/cloud/aiplatform_v1beta1/types/migration_service.py +++ b/google/cloud/aiplatform_v1beta1/types/migration_service.py @@ -60,23 +60,22 @@ class SearchMigratableResourcesRequest(proto.Message): A filter for your search. You can use the following types of filters: - - Resource type filters. The following strings filter for a - specific type of - [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]: - - - ``ml_engine_model_version:*`` - - ``automl_model:*`` - - ``automl_dataset:*`` - - ``data_labeling_dataset:*`` - - - "Migrated or not" filters. The following strings filter - for resources that either have or have not already been - migrated: - - - ``last_migrate_time:*`` filters for migrated - resources. - - ``NOT last_migrate_time:*`` filters for not yet - migrated resources. + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]: + + - ``ml_engine_model_version:*`` + - ``automl_model:*`` + - ``automl_dataset:*`` + - ``data_labeling_dataset:*`` + + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: + + - ``last_migrate_time:*`` filters for migrated resources. + - ``NOT last_migrate_time:*`` filters for not yet migrated + resources. """ parent: str = proto.Field( @@ -200,13 +199,13 @@ class MigrateMlEngineModelVersionConfig(proto.Message): Required. The ml.googleapis.com endpoint that this model version should be migrated from. Example values: - - ml.googleapis.com + - ml.googleapis.com - - us-centrall-ml.googleapis.com + - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com + - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com + - asia-east1-ml.googleapis.com model_version (str): Required. Full resource name of ml engine model version. Format: diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index 6e7513187d..40c66131b2 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import env_var from google.cloud.aiplatform_v1beta1.types import explanation from google.protobuf import duration_pb2 # type: ignore @@ -158,31 +156,31 @@ class Model(proto.Message): The possible formats are: - - ``jsonl`` The JSON Lines format, where each instance is a - single line. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + - ``jsonl`` The JSON Lines format, where each instance is a + single line. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - ``csv`` The CSV format, where each instance is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + - ``csv`` The CSV format, where each instance is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - ``tf-record`` The TFRecord format, where each instance is - a single record in tfrecord syntax. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + - ``tf-record`` The TFRecord format, where each instance is + a single record in tfrecord syntax. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - ``tf-record-gzip`` Similar to ``tf-record``, but the file - is gzipped. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + - ``tf-record-gzip`` Similar to ``tf-record``, but the file + is gzipped. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - ``bigquery`` Each instance is a single row in BigQuery. - Uses - [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. + - ``bigquery`` Each instance is a single row in BigQuery. + Uses + [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. - - ``file-list`` Each line of the file is the location of an - instance to process, uses ``gcs_source`` field of the - [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] - object. + - ``file-list`` Each line of the file is the location of an + instance to process, uses ``gcs_source`` field of the + [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] + object. If this Model doesn't support any of these formats it means it cannot be used with a @@ -207,19 +205,19 @@ class Model(proto.Message): The possible formats are: - - ``jsonl`` The JSON Lines format, where each prediction is - a single line. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + - ``jsonl`` The JSON Lines format, where each prediction is + a single line. Uses + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - - ``csv`` The CSV format, where each prediction is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + - ``csv`` The CSV format, where each prediction is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - - ``bigquery`` Each prediction is a single row in a - BigQuery table, uses - [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] - . + - ``bigquery`` Each prediction is a single row in a BigQuery + table, uses + [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] + . If this Model doesn't support any of these formats it means it cannot be used with a @@ -357,23 +355,23 @@ class ExportFormat(proto.Message): Output only. The ID of the export format. The possible format IDs are: - - ``tflite`` Used for Android mobile devices. + - ``tflite`` Used for Android mobile devices. - - ``edgetpu-tflite`` Used for `Edge - TPU `__ devices. + - ``edgetpu-tflite`` Used for `Edge + TPU `__ devices. - - ``tf-saved-model`` A tensorflow model in SavedModel - format. + - ``tf-saved-model`` A tensorflow model in SavedModel + format. - - ``tf-js`` A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. + - ``tf-js`` A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. - - ``core-ml`` Used for iOS mobile devices. + - ``core-ml`` Used for iOS mobile devices. - - ``custom-trained`` A Model that was uploaded or trained - by custom code. + - ``custom-trained`` A Model that was uploaded or trained by + custom code. exportable_contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat.ExportableContent]): Output only. The content of this Model that may be exported. @@ -945,19 +943,19 @@ class ModelContainerSpec(proto.Message): /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) health_route (str): Immutable. HTTP path on the container to send health checks to. Vertex AI intermittently sends GET requests to this path @@ -978,22 +976,22 @@ class ModelContainerSpec(proto.Message): /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The placeholders in this value are replaced as follows: - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) invoke_route_prefix (str): Immutable. Invoke route prefix for the custom container. - "/*" is the only supported value right now. By setting this + "/\*" is the only supported value right now. By setting this field, any non-root route on this model will be accessible with [PredictionService.Invoke] eg: "/invoke/foo/bar". @@ -1250,7 +1248,7 @@ class ExecAction(proto.Message): the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you need to + ('\|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. """ diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index 8890f70aca..d19c919018 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import job_state diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py index 558cc7bf00..18097ff531 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -96,8 +96,8 @@ class ModelEvaluationExplanationSpec(proto.Message): For AutoML Image Classification models, possible values are: - - ``image-integrated-gradients`` - - ``image-xrai`` + - ``image-integrated-gradients`` + - ``image-xrai`` explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): Explanation spec details. """ diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py index 36f9826ad8..39ffc8e063 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py @@ -75,13 +75,13 @@ class Slice(proto.Message): Output only. The dimension of the slice. Well-known dimensions are: - - ``annotationSpec``: This slice is on the test data that - has either ground truth or prediction with - [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] - equals to - [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. - - ``slice``: This slice is a user customized slice defined - by its SliceSpec. + - ``annotationSpec``: This slice is on the test data that + has either ground truth or prediction with + [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] + equals to + [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. + - ``slice``: This slice is a user customized slice defined + by its SliceSpec. value (str): Output only. The value of the dimension in this slice. diff --git a/google/cloud/aiplatform_v1beta1/types/model_garden_service.py b/google/cloud/aiplatform_v1beta1/types/model_garden_service.py index 8ae4fbadfa..3389bdff1b 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_garden_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_garden_service.py @@ -23,9 +23,8 @@ from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import model as gca_model from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import ( - publisher_model as gca_publisher_model, -) +from google.cloud.aiplatform_v1beta1.types import publisher_model as gca_publisher_model +from google.cloud.aiplatform_v1beta1.types import service_networking __protobuf__ = proto.module( @@ -381,19 +380,25 @@ class EndpointConfig(proto.Message): {region}-aiplatform.googleapis.com. The limitations will be removed soon. dedicated_endpoint_disabled (bool): - Optional. By default, if dedicated endpoint is enabled, the - endpoint will be exposed through a dedicated DNS - [Endpoint.dedicated_endpoint_dns]. Your request to the - dedicated DNS will be isolated from other users' traffic and - will have better performance and reliability. Note: Once you - enabled dedicated endpoint, you won't be able to send - request to the shared DNS - {region}-aiplatform.googleapis.com. The limitations will be - removed soon. + Optional. By default, if dedicated endpoint is enabled and + private service connect config is not set, the endpoint will + be exposed through a dedicated DNS + [Endpoint.dedicated_endpoint_dns]. If private service + connect config is set, the endpoint will be exposed through + private service connect. Your request to the dedicated DNS + will be isolated from other users' traffic and will have + better performance and reliability. Note: Once you enabled + dedicated endpoint, you won't be able to send request to the + shared DNS {region}-aiplatform.googleapis.com. The + limitations will be removed soon. If this field is set to true, the dedicated endpoint will be disabled and the deployed model will be exposed through the shared DNS {region}-aiplatform.googleapis.com. + private_service_connect_config (google.cloud.aiplatform_v1beta1.types.PrivateServiceConnectConfig): + Optional. Configuration for private service + connect. If set, the endpoint will be exposed + through private service connect. endpoint_user_id (str): Optional. Immutable. The ID to use for endpoint, which will become the final component of the endpoint resource name. If @@ -425,6 +430,13 @@ class EndpointConfig(proto.Message): proto.BOOL, number=4, ) + private_service_connect_config: ( + service_networking.PrivateServiceConnectConfig + ) = proto.Field( + proto.MESSAGE, + number=5, + message=service_networking.PrivateServiceConnectConfig, + ) endpoint_user_id: str = proto.Field( proto.STRING, number=3, diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitor.py b/google/cloud/aiplatform_v1beta1/types/model_monitor.py index 00df78680f..d6affc8089 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitor.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitor.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import model_monitoring_spec from google.protobuf import timestamp_pb2 # type: ignore @@ -235,29 +233,29 @@ class ModelMonitoringSchema(proto.Message): Feature names of the model. Vertex AI will try to match the features from your dataset as follows: - - For 'csv' files, the header names are required, and we - will extract the corresponding feature values when the - header names align with the feature names. - - For 'jsonl' files, we will extract the corresponding - feature values if the key names match the feature names. - Note: Nested features are not supported, so please ensure - your features are flattened. Ensure the feature values - are scalar or an array of scalars. - - For 'bigquery' dataset, we will extract the corresponding - feature values if the column names match the feature - names. Note: The column type can be a scalar or an array - of scalars. STRUCT or JSON types are not supported. You - may use SQL queries to select or aggregate the relevant - features from your original table. However, ensure that - the 'schema' of the query results meets our requirements. - - For the Vertex AI Endpoint Request Response Logging table - or Vertex AI Batch Prediction Job results. If the - [instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type] - is an array, ensure that the sequence in - [feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields] - matches the order of features in the prediction instance. - We will match the feature with the array in the order - specified in [feature_fields]. + - For 'csv' files, the header names are required, and we + will extract the corresponding feature values when the + header names align with the feature names. + - For 'jsonl' files, we will extract the corresponding + feature values if the key names match the feature names. + Note: Nested features are not supported, so please ensure + your features are flattened. Ensure the feature values are + scalar or an array of scalars. + - For 'bigquery' dataset, we will extract the corresponding + feature values if the column names match the feature + names. Note: The column type can be a scalar or an array + of scalars. STRUCT or JSON types are not supported. You + may use SQL queries to select or aggregate the relevant + features from your original table. However, ensure that + the 'schema' of the query results meets our requirements. + - For the Vertex AI Endpoint Request Response Logging table + or Vertex AI Batch Prediction Job results. If the + [instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type] + is an array, ensure that the sequence in + [feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields] + matches the order of features in the prediction instance. + We will match the feature with the array in the order + specified in [feature_fields]. prediction_fields (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringSchema.FieldSchema]): Prediction output names of the model. The requirements are the same as the @@ -267,10 +265,10 @@ class ModelMonitoringSchema(proto.Message): ``target_column`` is the one you specified when you train the model. For Prediction output drift analysis: - - AutoML Classification, the distribution of the argmax - label will be analyzed. - - AutoML Regression, the distribution of the value will be - analyzed. + - AutoML Classification, the distribution of the argmax + label will be analyzed. + - AutoML Regression, the distribution of the value will be + analyzed. ground_truth_fields (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringSchema.FieldSchema]): Target /ground truth names of the model. """ diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py index 0028ef6525..82d5711063 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py @@ -67,13 +67,13 @@ class ModelMonitoringJob(proto.Message): state (google.cloud.aiplatform_v1beta1.types.JobState): Output only. The state of the monitoring job. - - When the job is still creating, the state will be - 'JOB_STATE_PENDING'. - - Once the job is successfully created, the state will be - 'JOB_STATE_RUNNING'. - - Once the job is finished, the state will be one of - 'JOB_STATE_FAILED', 'JOB_STATE_SUCCEEDED', - 'JOB_STATE_PARTIALLY_SUCCEEDED'. + - When the job is still creating, the state will be + 'JOB_STATE_PENDING'. + - Once the job is successfully created, the state will be + 'JOB_STATE_RUNNING'. + - Once the job is finished, the state will be one of + 'JOB_STATE_FAILED', 'JOB_STATE_SUCCEEDED', + 'JOB_STATE_PARTIALLY_SUCCEEDED'. schedule (str): Output only. Schedule resource name. It will only appear when this job is triggered by a diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py index 9668d07b19..230587068f 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - model_monitor as gca_model_monitor, -) +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert from google.cloud.aiplatform_v1beta1.types import ( model_monitoring_job as gca_model_monitoring_job, diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py index e11607220c..34748e5355 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py @@ -112,12 +112,12 @@ class DataDriftSpec(proto.Message): categorical_metric_type (str): Supported metrics type: - - l_infinity - - jensen_shannon_divergence + - l_infinity + - jensen_shannon_divergence numeric_metric_type (str): Supported metrics type: - - jensen_shannon_divergence + - jensen_shannon_divergence default_categorical_alert_condition (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertCondition): Default alert condition for all the categorical features. @@ -409,8 +409,8 @@ class ModelMonitoringBigQuerySource(proto.Message): BigQuery URI to a table, up to 2000 characters long. All the columns in the table will be selected. Accepted forms: - - BigQuery path. For example: - ``bq://projectId.bqDatasetId.bqTableId``. + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. This field is a member of `oneof`_ ``connection``. query (str): diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py index 8914306c3a..9b532a0d97 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py @@ -109,11 +109,11 @@ class DistributionDataValue(proto.Message): Distribution distance deviation from the current dataset's statistics to baseline dataset's statistics. - - For categorical feature, the distribution distance is - calculated by L-inifinity norm or Jensen–Shannon - divergence. - - For numerical feature, the distribution distance is - calculated by Jensen–Shannon divergence. + - For categorical feature, the distribution distance is + calculated by L-inifinity norm or Jensen–Shannon + divergence. + - For numerical feature, the distribution distance is + calculated by Jensen–Shannon divergence. """ distribution: struct_pb2.Value = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index b3969651ab..da346ea6cd 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import evaluated_annotation from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import io @@ -217,25 +215,25 @@ class ListModelsRequest(proto.Message): An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. - - ``model`` supports = and !=. ``model`` represents the - Model ID, i.e. the last segment of the Model's [resource - name][google.cloud.aiplatform.v1beta1.Model.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: + - ``model`` supports = and !=. ``model`` represents the + Model ID, i.e. the last segment of the Model's [resource + name][google.cloud.aiplatform.v1beta1.Model.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. - - ``base_model_name`` only supports = + - ``base_model_name`` only supports = Some examples: - - ``model=1234`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - - ``baseModelName="text-bison"`` + - ``model=1234`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``baseModelName="text-bison"`` page_size (int): The standard list page size. page_token (str): @@ -319,16 +317,16 @@ class ListModelVersionsRequest(proto.Message): An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. - - ``labels`` supports general map functions that is: + - ``labels`` supports general map functions that is: - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. Some examples: - - ``labels.myKey="myValue"`` + - ``labels.myKey="myValue"`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. order_by (str): @@ -336,8 +334,8 @@ class ListModelVersionsRequest(proto.Message): ascending order. Use "desc" after a field name for descending. Supported fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` Example: ``update_time asc, create_time desc``. """ @@ -1133,7 +1131,7 @@ class ListModelEvaluationSlicesRequest(proto.Message): filter (str): The standard list filter. - - ``slice.dimension`` - for =. + - ``slice.dimension`` - for =. page_size (int): The standard list page size. page_token (str): diff --git a/google/cloud/aiplatform_v1beta1/types/nas_job.py b/google/cloud/aiplatform_v1beta1/types/nas_job.py index d06658bf0e..88bf539a65 100644 --- a/google/cloud/aiplatform_v1beta1/types/nas_job.py +++ b/google/cloud/aiplatform_v1beta1/types/nas_job.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import study from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_execution_job.py b/google/cloud/aiplatform_v1beta1/types/notebook_execution_job.py index 096dc44912..9471547ce0 100644 --- a/google/cloud/aiplatform_v1beta1/types/notebook_execution_job.py +++ b/google/cloud/aiplatform_v1beta1/types/notebook_execution_job.py @@ -19,14 +19,10 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import job_state as gca_job_state from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import ( - network_spec as gca_network_spec, -) +from google.cloud.aiplatform_v1beta1.types import network_spec as gca_network_spec from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_idle_shutdown_config.py b/google/cloud/aiplatform_v1beta1/types/notebook_idle_shutdown_config.py index 583d7a33f8..309ec4408f 100644 --- a/google/cloud/aiplatform_v1beta1/types/notebook_idle_shutdown_config.py +++ b/google/cloud/aiplatform_v1beta1/types/notebook_idle_shutdown_config.py @@ -42,7 +42,7 @@ class NotebookIdleShutdownConfig(proto.Message): - - 60. + 60. idle_shutdown_disabled (bool): Whether Idle Shutdown is disabled in this NotebookRuntimeTemplate. diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_runtime.py b/google/cloud/aiplatform_v1beta1/types/notebook_runtime.py index d641d4b18d..fb18bc8e88 100644 --- a/google/cloud/aiplatform_v1beta1/types/notebook_runtime.py +++ b/google/cloud/aiplatform_v1beta1/types/notebook_runtime.py @@ -19,17 +19,11 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import ( - network_spec as gca_network_spec, -) +from google.cloud.aiplatform_v1beta1.types import network_spec as gca_network_spec from google.cloud.aiplatform_v1beta1.types import notebook_euc_config -from google.cloud.aiplatform_v1beta1.types import ( - notebook_idle_shutdown_config, -) +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1beta1.types import ( notebook_runtime_template_ref as gca_notebook_runtime_template_ref, ) @@ -323,12 +317,12 @@ class NotebookRuntime(proto.Message): "aiplatform.googleapis.com/" and are immutable. Following system labels exist for NotebookRuntime: - - "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": - output only, its value is the Compute Engine instance id. - - "aiplatform.googleapis.com/colab_enterprise_entry_service": - its value is either "bigquery" or "vertex"; if absent, it - should be "vertex". This is to describe the entry - service, either BigQuery or Vertex. + - "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": + output only, its value is the Compute Engine instance id. + - "aiplatform.googleapis.com/colab_enterprise_entry_service": + its value is either "bigquery" or "vertex"; if absent, it + should be "vertex". This is to describe the entry service, + either BigQuery or Vertex. expiration_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this NotebookRuntime will be expired: diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_service.py b/google/cloud/aiplatform_v1beta1/types/notebook_service.py index f6c6a80bdd..83ec25b51f 100644 --- a/google/cloud/aiplatform_v1beta1/types/notebook_service.py +++ b/google/cloud/aiplatform_v1beta1/types/notebook_service.py @@ -165,32 +165,32 @@ class ListNotebookRuntimeTemplatesRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``notebookRuntimeTemplate`` supports = and !=. - ``notebookRuntimeTemplate`` represents the - NotebookRuntimeTemplate ID, i.e. the last segment of the - NotebookRuntimeTemplate's [resource name] - [google.cloud.aiplatform.v1beta1.NotebookRuntimeTemplate.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - - ``notebookRuntimeType`` supports = and !=. - notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. - - ``machineType`` supports = and !=. - - ``acceleratorType`` supports = and !=. + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1beta1.NotebookRuntimeTemplate.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + - ``machineType`` supports = and !=. + - ``acceleratorType`` supports = and !=. Some examples: - - ``notebookRuntimeTemplate=notebookRuntimeTemplate123`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - - ``notebookRuntimeType=USER_DEFINED`` - - ``machineType=e2-standard-4`` - - ``acceleratorType=NVIDIA_TESLA_T4`` + - ``notebookRuntimeTemplate=notebookRuntimeTemplate123`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``notebookRuntimeType=USER_DEFINED`` + - ``machineType=e2-standard-4`` + - ``acceleratorType=NVIDIA_TESLA_T4`` page_size (int): Optional. The standard list page size. page_token (str): @@ -208,9 +208,9 @@ class ListNotebookRuntimeTemplatesRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. """ @@ -305,7 +305,7 @@ class UpdateNotebookRuntimeTemplateRequest(proto.Message): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` """ notebook_runtime_template: gca_notebook_runtime.NotebookRuntimeTemplate = ( @@ -421,47 +421,45 @@ class ListNotebookRuntimesRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``notebookRuntime`` supports = and !=. - ``notebookRuntime`` represents the NotebookRuntime ID, - i.e. the last segment of the NotebookRuntime's [resource - name] - [google.cloud.aiplatform.v1beta1.NotebookRuntime.name]. - - ``displayName`` supports = and != and regex. - - ``notebookRuntimeTemplate`` supports = and !=. - ``notebookRuntimeTemplate`` represents the - NotebookRuntimeTemplate ID, i.e. the last segment of the - NotebookRuntimeTemplate's [resource name] - [google.cloud.aiplatform.v1beta1.NotebookRuntimeTemplate.name]. - - ``healthState`` supports = and !=. healthState enum: - [HEALTHY, UNHEALTHY, HEALTH_STATE_UNSPECIFIED]. - - ``runtimeState`` supports = and !=. runtimeState enum: - [RUNTIME_STATE_UNSPECIFIED, RUNNING, BEING_STARTED, - BEING_STOPPED, STOPPED, BEING_UPGRADED, ERROR, INVALID]. - - ``runtimeUser`` supports = and !=. - - API version is UI only: ``uiState`` supports = and !=. - uiState enum: [UI_RESOURCE_STATE_UNSPECIFIED, - UI_RESOURCE_STATE_BEING_CREATED, - UI_RESOURCE_STATE_ACTIVE, - UI_RESOURCE_STATE_BEING_DELETED, - UI_RESOURCE_STATE_CREATION_FAILED]. - - ``notebookRuntimeType`` supports = and !=. - notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. - - ``machineType`` supports = and !=. - - ``acceleratorType`` supports = and !=. + - ``notebookRuntime`` supports = and !=. ``notebookRuntime`` + represents the NotebookRuntime ID, i.e. the last segment + of the NotebookRuntime's [resource name] + [google.cloud.aiplatform.v1beta1.NotebookRuntime.name]. + - ``displayName`` supports = and != and regex. + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1beta1.NotebookRuntimeTemplate.name]. + - ``healthState`` supports = and !=. healthState enum: + [HEALTHY, UNHEALTHY, HEALTH_STATE_UNSPECIFIED]. + - ``runtimeState`` supports = and !=. runtimeState enum: + [RUNTIME_STATE_UNSPECIFIED, RUNNING, BEING_STARTED, + BEING_STOPPED, STOPPED, BEING_UPGRADED, ERROR, INVALID]. + - ``runtimeUser`` supports = and !=. + - API version is UI only: ``uiState`` supports = and !=. + uiState enum: [UI_RESOURCE_STATE_UNSPECIFIED, + UI_RESOURCE_STATE_BEING_CREATED, UI_RESOURCE_STATE_ACTIVE, + UI_RESOURCE_STATE_BEING_DELETED, + UI_RESOURCE_STATE_CREATION_FAILED]. + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + - ``machineType`` supports = and !=. + - ``acceleratorType`` supports = and !=. Some examples: - - ``notebookRuntime="notebookRuntime123"`` - - ``displayName="myDisplayName"`` and - ``displayName=~"myDisplayNameRegex"`` - - ``notebookRuntimeTemplate="notebookRuntimeTemplate321"`` - - ``healthState=HEALTHY`` - - ``runtimeState=RUNNING`` - - ``runtimeUser="test@google.com"`` - - ``uiState=UI_RESOURCE_STATE_BEING_DELETED`` - - ``notebookRuntimeType=USER_DEFINED`` - - ``machineType=e2-standard-4`` - - ``acceleratorType=NVIDIA_TESLA_T4`` + - ``notebookRuntime="notebookRuntime123"`` + - ``displayName="myDisplayName"`` and + ``displayName=~"myDisplayNameRegex"`` + - ``notebookRuntimeTemplate="notebookRuntimeTemplate321"`` + - ``healthState=HEALTHY`` + - ``runtimeState=RUNNING`` + - ``runtimeUser="test@google.com"`` + - ``uiState=UI_RESOURCE_STATE_BEING_DELETED`` + - ``notebookRuntimeType=USER_DEFINED`` + - ``machineType=e2-standard-4`` + - ``acceleratorType=NVIDIA_TESLA_T4`` page_size (int): Optional. The standard list page size. page_token (str): @@ -479,9 +477,9 @@ class ListNotebookRuntimesRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. """ @@ -801,18 +799,18 @@ class ListNotebookExecutionJobsRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``notebookExecutionJob`` supports = and !=. - ``notebookExecutionJob`` represents the - NotebookExecutionJob ID. - - ``displayName`` supports = and != and regex. - - ``schedule`` supports = and != and regex. + - ``notebookExecutionJob`` supports = and !=. + ``notebookExecutionJob`` represents the + NotebookExecutionJob ID. + - ``displayName`` supports = and != and regex. + - ``schedule`` supports = and != and regex. Some examples: - - ``notebookExecutionJob="123"`` - - ``notebookExecutionJob="my-execution-job"`` - - ``displayName="myDisplayName"`` and - ``displayName=~"myDisplayNameRegex"`` + - ``notebookExecutionJob="123"`` + - ``notebookExecutionJob="my-execution-job"`` + - ``displayName="myDisplayName"`` and + ``displayName=~"myDisplayNameRegex"`` page_size (int): Optional. The standard list page size. page_token (str): @@ -827,9 +825,9 @@ class ListNotebookExecutionJobsRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``display_name`` - - ``create_time`` - - ``update_time`` + - ``display_name`` + - ``create_time`` + - ``update_time`` Example: ``display_name, create_time desc``. view (google.cloud.aiplatform_v1beta1.types.NotebookExecutionJobView): diff --git a/google/cloud/aiplatform_v1beta1/types/persistent_resource.py b/google/cloud/aiplatform_v1beta1/types/persistent_resource.py index 8596b7bd05..bfa91069c6 100644 --- a/google/cloud/aiplatform_v1beta1/types/persistent_resource.py +++ b/google/cloud/aiplatform_v1beta1/types/persistent_resource.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import service_networking from google.protobuf import timestamp_pb2 # type: ignore @@ -357,8 +355,8 @@ class ResourceRuntimeSpec(proto.Message): r"""Configuration for the runtime on a PersistentResource instance, including but not limited to: - - Service accounts used to run the workloads. - - Whether to make it a dedicated Ray Cluster. + - Service accounts used to run the workloads. + - Whether to make it a dedicated Ray Cluster. Attributes: service_account_spec (google.cloud.aiplatform_v1beta1.types.ServiceAccountSpec): @@ -495,9 +493,9 @@ class ServiceAccountSpec(proto.Message): service_account (str): Optional. Required when all below conditions are met - - ``enable_custom_service_account`` is true; - - any runtime is specified via ``ResourceRuntimeSpec`` on - creation time, for example, Ray + - ``enable_custom_service_account`` is true; + - any runtime is specified via ``ResourceRuntimeSpec`` on + creation time, for example, Ray The users must have ``iam.serviceAccounts.actAs`` permission on this service account and then the specified runtime diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index c774364e72..967a6abdad 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -21,9 +21,7 @@ from google.cloud.aiplatform_v1beta1.types import artifact from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import pipeline_failure_policy from google.cloud.aiplatform_v1beta1.types import pipeline_state @@ -94,8 +92,8 @@ class PipelineJob(proto.Message): Note there is some reserved label key for Vertex AI Pipelines. - - ``vertex-ai-pipelines-run-billing-id``, user set value - will get overrided. + - ``vertex-ai-pipelines-run-billing-id``, user set value + will get overrided. runtime_config (google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig): Runtime config of the pipeline. encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index 0803e9f684..e33fdad495 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import ( training_pipeline as gca_training_pipeline, ) @@ -123,25 +121,25 @@ class ListTrainingPipelinesRequest(proto.Message): Supported fields: - - ``display_name`` supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state`` supports ``=``, ``!=`` comparisons. - - ``training_task_definition`` ``=``, ``!=`` comparisons, - and ``:`` wildcard. - - ``create_time`` supports ``=``, ``!=``,\ ``<``, - ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must - be in RFC 3339 format. - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality \`labels.key:\* - - key existence + - ``display_name`` supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state`` supports ``=``, ``!=`` comparisons. + - ``training_task_definition`` ``=``, ``!=`` comparisons, + and ``:`` wildcard. + - ``create_time`` supports ``=``, ``!=``,\ ``<``, + ``<=``,\ ``>``, ``>=`` comparisons. ``create_time`` must + be in RFC 3339 format. + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality \`labels.key:\* + - key existence Some examples of using the filter are: - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name:"my_pipeline_*"`` - - ``state!="PIPELINE_STATE_FAILED" OR display_name="my_pipeline"`` - - ``NOT display_name="my_pipeline"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``training_task_definition:"*automl_text_classification*"`` + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name:"my_pipeline_*"`` + - ``state!="PIPELINE_STATE_FAILED" OR display_name="my_pipeline"`` + - ``NOT display_name="my_pipeline"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``training_task_definition:"*automl_text_classification*"`` page_size (int): The standard list page size. page_token (str): @@ -306,28 +304,28 @@ class ListPipelineJobsRequest(proto.Message): Lists the PipelineJobs that match the filter expression. The following fields are supported: - - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. - - ``display_name``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``pipeline_job_user_id``: Supports ``=``, ``!=`` - comparisons, and ``:`` wildcard. for example, can check - if pipeline's display_name contains *step* by doing - display_name:"*step*" - - ``state``: Supports ``=`` and ``!=`` comparisons. - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. - - ``template_uri``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``template_metadata.version``: Supports ``=``, ``!=`` - comparisons, and ``:`` wildcard. + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``pipeline_job_user_id``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. for example, can check if + pipeline's display_name contains *step* by doing + display_name:"*step*" + - ``state``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``labels``: Supports key-value equality and key presence. + - ``template_uri``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``template_metadata.version``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. Filter expressions can be combined together using logical operators (``AND`` & ``OR``). For example: @@ -338,11 +336,11 @@ class ListPipelineJobsRequest(proto.Message): Examples: - - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` - PipelineJobs created or updated after 2020-05-18 00:00:00 - UTC. - - ``labels.env = "prod"`` PipelineJobs with label "env" set - to "prod". + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". page_size (int): The standard list page size. page_token (str): @@ -364,10 +362,10 @@ class ListPipelineJobsRequest(proto.Message): default order is create time in descending order. Supported fields: - - ``create_time`` - - ``update_time`` - - ``end_time`` - - ``start_time`` + - ``create_time`` + - ``update_time`` + - ``end_time`` + - ``start_time`` read_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index ddc6172966..7d9d7a93bf 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -610,11 +610,11 @@ class ExplainRequest(proto.Message): of the DeployedModel. Can be used for explaining prediction results with different configurations, such as: - - Explaining top-5 predictions results as opposed to top-1; - - Increasing path count or step count of the attribution - methods to reduce approximate errors; - - Using different baselines for explaining the prediction - results. + - Explaining top-5 predictions results as opposed to top-1; + - Increasing path count or step count of the attribution + methods to reduce approximate errors; + - Using different baselines for explaining the prediction + results. concurrent_explanation_spec_override (MutableMapping[str, google.cloud.aiplatform_v1beta1.types.ExplanationSpecOverride]): Optional. This field is the same as the one above, but supports multiple explanations to occur in parallel. The key diff --git a/google/cloud/aiplatform_v1beta1/types/reasoning_engine.py b/google/cloud/aiplatform_v1beta1/types/reasoning_engine.py index e0b797ff57..a36a23f1e0 100644 --- a/google/cloud/aiplatform_v1beta1/types/reasoning_engine.py +++ b/google/cloud/aiplatform_v1beta1/types/reasoning_engine.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import env_var from google.cloud.aiplatform_v1beta1.types import service_networking from google.protobuf import duration_pb2 # type: ignore @@ -149,11 +147,11 @@ class DeploymentSpec(proto.Message): 'memory' keys are supported. Defaults to {"cpu": "4", "memory": "4Gi"}. - - The only supported values for CPU are '1', '2', '4', and - '8'. For more information, go to - https://cloud.google.com/run/docs/configuring/cpu. - - For supported 'memory' values and syntax, go to - https://cloud.google.com/run/docs/configuring/memory-limits + - The only supported values for CPU are '1', '2', '4', and + '8'. For more information, go to + https://cloud.google.com/run/docs/configuring/cpu. + - For supported 'memory' values and syntax, go to + https://cloud.google.com/run/docs/configuring/memory-limits container_concurrency (int): Optional. Concurrency for each container and agent server. Recommended value: 2 \* cpu + 1. Defaults to 9. diff --git a/google/cloud/aiplatform_v1beta1/types/saved_query.py b/google/cloud/aiplatform_v1beta1/types/saved_query.py index da4ba3888b..11724a1e5e 100644 --- a/google/cloud/aiplatform_v1beta1/types/saved_query.py +++ b/google/cloud/aiplatform_v1beta1/types/saved_query.py @@ -57,16 +57,16 @@ class SavedQuery(proto.Message): problem_type (str): Required. Problem type of the SavedQuery. Allowed values: - - IMAGE_CLASSIFICATION_SINGLE_LABEL - - IMAGE_CLASSIFICATION_MULTI_LABEL - - IMAGE_BOUNDING_POLY - - IMAGE_BOUNDING_BOX - - TEXT_CLASSIFICATION_SINGLE_LABEL - - TEXT_CLASSIFICATION_MULTI_LABEL - - TEXT_EXTRACTION - - TEXT_SENTIMENT - - VIDEO_CLASSIFICATION - - VIDEO_OBJECT_TRACKING + - IMAGE_CLASSIFICATION_SINGLE_LABEL + - IMAGE_CLASSIFICATION_MULTI_LABEL + - IMAGE_BOUNDING_POLY + - IMAGE_BOUNDING_BOX + - TEXT_CLASSIFICATION_SINGLE_LABEL + - TEXT_CLASSIFICATION_MULTI_LABEL + - TEXT_EXTRACTION + - TEXT_SENTIMENT + - VIDEO_CLASSIFICATION + - VIDEO_OBJECT_TRACKING annotation_spec_count (int): Output only. Number of AnnotationSpecs in the context of the SavedQuery. diff --git a/google/cloud/aiplatform_v1beta1/types/schedule_service.py b/google/cloud/aiplatform_v1beta1/types/schedule_service.py index 0bd31f479a..234e9c7f7e 100644 --- a/google/cloud/aiplatform_v1beta1/types/schedule_service.py +++ b/google/cloud/aiplatform_v1beta1/types/schedule_service.py @@ -91,24 +91,24 @@ class ListSchedulesRequest(proto.Message): Lists the Schedules that match the filter expression. The following fields are supported: - - ``display_name``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``state``: Supports ``=`` and ``!=`` comparisons. - - ``request``: Supports existence of the - check. (e.g. ``create_pipeline_job_request:*`` --> - Schedule has create_pipeline_job_request). - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``start_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, ``>=`` comparisons and ``:*`` existence check. - Values must be in RFC 3339 format. - - ``next_run_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``state``: Supports ``=`` and ``!=`` comparisons. + - ``request``: Supports existence of the + check. (e.g. ``create_pipeline_job_request:*`` --> + Schedule has create_pipeline_job_request). + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``start_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, ``>=`` comparisons and ``:*`` existence check. + Values must be in RFC 3339 format. + - ``next_run_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 + format. Filter expressions can be combined together using logical operators (``NOT``, ``AND`` & ``OR``). The syntax to define @@ -116,11 +116,11 @@ class ListSchedulesRequest(proto.Message): Examples: - - ``state="ACTIVE" AND display_name:"my_schedule_*"`` - - ``NOT display_name="my_schedule"`` - - ``create_time>"2021-05-18T00:00:00Z"`` - - ``end_time>"2021-05-18T00:00:00Z" OR NOT end_time:*`` - - ``create_pipeline_job_request:*`` + - ``state="ACTIVE" AND display_name:"my_schedule_*"`` + - ``NOT display_name="my_schedule"`` + - ``create_time>"2021-05-18T00:00:00Z"`` + - ``end_time>"2021-05-18T00:00:00Z" OR NOT end_time:*`` + - ``create_pipeline_job_request:*`` page_size (int): The standard list page size. Default to 100 if not specified. @@ -146,10 +146,10 @@ class ListSchedulesRequest(proto.Message): Supported fields: - - ``create_time`` - - ``start_time`` - - ``end_time`` - - ``next_run_time`` + - ``create_time`` + - ``start_time`` + - ``end_time`` + - ``next_run_time`` """ parent: str = proto.Field( @@ -273,9 +273,9 @@ class UpdateScheduleRequest(proto.Message): Required. The Schedule which replaces the resource on the server. The following restrictions will be applied: - - The scheduled request type cannot be changed. - - The non-empty fields cannot be unset. - - The output_only fields will be ignored if specified. + - The scheduled request type cannot be changed. + - The non-empty fields cannot be unset. + - The output_only fields will be ignored if specified. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. See [google.protobuf.FieldMask][google.protobuf.FieldMask]. diff --git a/google/cloud/aiplatform_v1beta1/types/session.py b/google/cloud/aiplatform_v1beta1/types/session.py index 679b7d7591..31ab47c669 100644 --- a/google/cloud/aiplatform_v1beta1/types/session.py +++ b/google/cloud/aiplatform_v1beta1/types/session.py @@ -125,7 +125,7 @@ class SessionEvent(proto.Message): Attributes: name (str): Identifier. The resource name of the event. - Format:\ ``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}/events/{event}``. + Format:``projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}/sessions/{session}/events/{event}``. author (str): Required. The name of the agent that sent the event, or user. diff --git a/google/cloud/aiplatform_v1beta1/types/session_service.py b/google/cloud/aiplatform_v1beta1/types/session_service.py index 727787e4ac..ad4e10e9f6 100644 --- a/google/cloud/aiplatform_v1beta1/types/session_service.py +++ b/google/cloud/aiplatform_v1beta1/types/session_service.py @@ -129,8 +129,8 @@ class ListSessionsRequest(proto.Message): sorted in ascending order. Use "desc" after a field name for descending. Supported fields: - - ``create_time`` - - ``update_time`` + - ``create_time`` + - ``update_time`` Example: ``create_time desc``. """ diff --git a/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py index 846de86eb0..da985958f0 100644 --- a/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py +++ b/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool from google.protobuf import field_mask_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/google/cloud/aiplatform_v1beta1/types/tensorboard.py index f617671f1c..2d2083a4fd 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.protobuf import timestamp_pb2 # type: ignore diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py index 668a434185..16a9463d47 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py @@ -65,10 +65,10 @@ class TensorboardExperiment(proto.Message): ``aiplatform.googleapis.com/`` and are immutable. The following system labels exist for each Dataset: - - ``aiplatform.googleapis.com/dataset_metadata_schema``: - output only. Its value is the - [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - title. + - ``aiplatform.googleapis.com/dataset_metadata_schema``: + output only. Its value is the + [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + title. etag (str): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update diff --git a/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py index 2028484604..029dbbd7d2 100644 --- a/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py +++ b/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py @@ -20,16 +20,12 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard as gca_tensorboard, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard from google.cloud.aiplatform_v1beta1.types import tensorboard_data from google.cloud.aiplatform_v1beta1.types import ( tensorboard_experiment as gca_tensorboard_experiment, ) -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import ( tensorboard_time_series as gca_tensorboard_time_series, ) diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py index 5ff52d65e9..e6d1f8babb 100644 --- a/google/cloud/aiplatform_v1beta1/types/tool.py +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -800,8 +800,17 @@ class GoogleMaps(proto.Message): r"""Tool to retrieve public maps data for grounding, powered by Google. + Attributes: + enable_widget (bool): + If true, include the widget context token in + the response. """ + enable_widget: bool = proto.Field( + proto.BOOL, + number=1, + ) + class EnterpriseWebSearch(proto.Message): r"""Tool to search public web data, powered by Vertex AI Search @@ -912,20 +921,31 @@ class Mode(proto.Enum): language response. ANY (2): Model is constrained to always predicting function calls - only. If "allowed_function_names" are set, the predicted - function calls will be limited to any one of - "allowed_function_names", else the predicted function calls - will be any one of the provided "function_declarations". + only. If + [allowed_function_names][FunctionCallingConfig.allowed_function_names] + are set, the predicted function calls will be limited to any + one of ``allowed_function_names``, else the predicted + function calls will be any one of the provided + [FunctionDeclaration]. NONE (3): Model will not predict any function calls. Model behavior is same as when not passing any function declarations. + VALIDATED (5): + Model is constrained to predict either function calls or + natural language response. If + [allowed_function_names][FunctionCallingConfig.allowed_function_names] + are set, the predicted function calls will be limited to any + one of ``allowed_function_names``, else the predicted + function calls will be any one of the provided + [FunctionDeclaration]. """ MODE_UNSPECIFIED = 0 AUTO = 1 ANY = 2 NONE = 3 + VALIDATED = 5 mode: Mode = proto.Field( proto.ENUM, diff --git a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py index f4bbc335a3..a9e33a27a5 100644 --- a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -19,9 +19,7 @@ import proto # type: ignore -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import pipeline_state @@ -303,19 +301,19 @@ class InputDataConfig(proto.Message): The Vertex AI environment variables representing Cloud Storage data URIs are represented in the Cloud Storage wildcard format to support sharded data. e.g.: - "gs://.../training-*.jsonl" + "gs://.../training-\*.jsonl" - - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for - tabular data + - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for + tabular data - - AIP_TRAINING_DATA_URI = - "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" + - AIP_TRAINING_DATA_URI = + "gcs_destination/dataset---/training-\*.${AIP_DATA_FORMAT}" - - AIP_VALIDATION_DATA_URI = - "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" + - AIP_VALIDATION_DATA_URI = + "gcs_destination/dataset---/validation-\*.${AIP_DATA_FORMAT}" - - AIP_TEST_DATA_URI = - "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". + - AIP_TEST_DATA_URI = + "gcs_destination/dataset---/test-\*.${AIP_DATA_FORMAT}". This field is a member of `oneof`_ ``destination``. bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): @@ -331,16 +329,16 @@ class InputDataConfig(proto.Message): dataset three tables are created, ``training``, ``validation`` and ``test``. - - AIP_DATA_FORMAT = "bigquery". + - AIP_DATA_FORMAT = "bigquery". - - AIP_TRAINING_DATA_URI = - "bigquery_destination.dataset\_\ **\ .training" + - AIP_TRAINING_DATA_URI = + "bigquery_destination.dataset\_\ **\ .training" - - AIP_VALIDATION_DATA_URI = - "bigquery_destination.dataset\_\ **\ .validation" + - AIP_VALIDATION_DATA_URI = + "bigquery_destination.dataset\_\ **\ .validation" - - AIP_TEST_DATA_URI = - "bigquery_destination.dataset\_\ **\ .test". + - AIP_TEST_DATA_URI = + "bigquery_destination.dataset\_\ **\ .test". This field is a member of `oneof`_ ``destination``. dataset_id (str): diff --git a/google/cloud/aiplatform_v1beta1/types/tuning_job.py b/google/cloud/aiplatform_v1beta1/types/tuning_job.py index cc44d01089..da5ee56651 100644 --- a/google/cloud/aiplatform_v1beta1/types/tuning_job.py +++ b/google/cloud/aiplatform_v1beta1/types/tuning_job.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import content -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import evaluation_service from google.cloud.aiplatform_v1beta1.types import job_state from google.protobuf import struct_pb2 # type: ignore @@ -52,6 +50,7 @@ "EvaluationConfig", "EvaluateDatasetRun", "TunedModelCheckpoint", + "PreTunedModel", }, ) @@ -71,6 +70,10 @@ class TuningJob(proto.Message): The base model that is being tuned. See `Supported models `__. + This field is a member of `oneof`_ ``source_model``. + pre_tuned_model (google.cloud.aiplatform_v1beta1.types.PreTunedModel): + The pre-tuned model for continuous tuning. + This field is a member of `oneof`_ ``source_model``. supervised_tuning_spec (google.cloud.aiplatform_v1beta1.types.SupervisedTuningSpec): Tuning Spec for Supervised Fine Tuning. @@ -189,6 +192,12 @@ class TuningJob(proto.Message): number=4, oneof="source_model", ) + pre_tuned_model: "PreTunedModel" = proto.Field( + proto.MESSAGE, + number=31, + oneof="source_model", + message="PreTunedModel", + ) supervised_tuning_spec: "SupervisedTuningSpec" = proto.Field( proto.MESSAGE, number=5, @@ -1279,4 +1288,42 @@ class TunedModelCheckpoint(proto.Message): ) +class PreTunedModel(proto.Message): + r"""A pre-tuned model for continuous tuning. + + Attributes: + tuned_model_name (str): + The resource name of the Model. E.g., a model resource name + with a specified version id or alias: + + ``projects/{project}/locations/{location}/models/{model}@{version_id}`` + + ``projects/{project}/locations/{location}/models/{model}@{alias}`` + + Or, omit the version id to use the default version: + + ``projects/{project}/locations/{location}/models/{model}`` + checkpoint_id (str): + Optional. The source checkpoint id. If not + specified, the default checkpoint will be used. + base_model (str): + Output only. The name of the base model this + [PreTunedModel][google.cloud.aiplatform.v1beta1.PreTunedModel] + was tuned from. + """ + + tuned_model_name: str = proto.Field( + proto.STRING, + number=1, + ) + checkpoint_id: str = proto.Field( + proto.STRING, + number=2, + ) + base_model: str = proto.Field( + proto.STRING, + number=3, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/ui_pipeline_spec.py b/google/cloud/aiplatform_v1beta1/types/ui_pipeline_spec.py index 90afa95420..bebe75dff3 100644 --- a/google/cloud/aiplatform_v1beta1/types/ui_pipeline_spec.py +++ b/google/cloud/aiplatform_v1beta1/types/ui_pipeline_spec.py @@ -47,9 +47,9 @@ class ArtifactTypeSchema(proto.Message): The name of the type. The format of the title must be: ``.``. Examples: - - ``aiplatform.Model`` - - ``acme.CustomModel`` When this field is set, the type - must be pre-registered in the MLMD store. + - ``aiplatform.Model`` + - ``acme.CustomModel`` When this field is set, the type must + be pre-registered in the MLMD store. This field is a member of `oneof`_ ``kind``. schema_uri (str): diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py index 60743323f2..e612655438 100644 --- a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py @@ -20,9 +20,7 @@ import proto # type: ignore from google.cloud.aiplatform_v1beta1.types import api_auth as gca_api_auth -from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, -) +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec from google.cloud.aiplatform_v1beta1.types import io from google.protobuf import timestamp_pb2 # type: ignore @@ -1049,8 +1047,8 @@ class LayoutParser(proto.Message): ``additional_config.parse_as_scanned_pdf`` field must be false. Format: - - ``projects/{project_id}/locations/{location}/processors/{processor_id}`` - - ``projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}`` + - ``projects/{project_id}/locations/{location}/processors/{processor_id}`` + - ``projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}`` max_parsing_requests_per_min (int): The maximum number of requests the job is allowed to make to the Document AI processor per @@ -1089,7 +1087,7 @@ class LlmParser(proto.Message): model_name (str): The name of a LLM model used for parsing. Format: - - ``projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}`` + - ``projects/{project_id}/locations/{location}/publishers/{publisher}/models/{model}`` max_parsing_requests_per_min (int): The maximum number of requests the job is allowed to make to the LLM model per minute. @@ -1167,11 +1165,10 @@ class RagFileMetadataConfig(proto.Message): files as well as entire Google Cloud Storage directories. Sample formats: - - ``gs://bucket_name/my_directory/object_name/metadata_schema.json`` - - ``gs://bucket_name/my_directory`` If providing a - directory, the metadata schema will be read from the - files that ends with "metadata_schema.json" in the - directory. + - ``gs://bucket_name/my_directory/object_name/metadata_schema.json`` + - ``gs://bucket_name/my_directory`` If providing a + directory, the metadata schema will be read from the files + that ends with "metadata_schema.json" in the directory. This field is a member of `oneof`_ ``metadata_schema_source``. google_drive_metadata_schema_source (google.cloud.aiplatform_v1beta1.types.GoogleDriveSource): @@ -1191,10 +1188,10 @@ class RagFileMetadataConfig(proto.Message): files as well as entire Google Cloud Storage directories. Sample formats: - - ``gs://bucket_name/my_directory/object_name/metadata.json`` - - ``gs://bucket_name/my_directory`` If providing a - directory, the metadata will be read from the files that - ends with "metadata.json" in the directory. + - ``gs://bucket_name/my_directory/object_name/metadata.json`` + - ``gs://bucket_name/my_directory`` If providing a + directory, the metadata will be read from the files that + ends with "metadata.json" in the directory. This field is a member of `oneof`_ ``metadata_source``. google_drive_metadata_source (google.cloud.aiplatform_v1beta1.types.GoogleDriveSource): @@ -1307,8 +1304,8 @@ class ImportRagFilesConfig(proto.Message): files as well as entire Google Cloud Storage directories. Sample formats: - - ``gs://bucket_name/my_directory/object_name/my_file.txt`` - - ``gs://bucket_name/my_directory`` + - ``gs://bucket_name/my_directory/object_name/my_file.txt`` + - ``gs://bucket_name/my_directory`` This field is a member of `oneof`_ ``import_source``. google_drive_source (google.cloud.aiplatform_v1beta1.types.GoogleDriveSource): @@ -1543,10 +1540,10 @@ class Basic(proto.Message): r"""Basic tier is a cost-effective and low compute tier suitable for the following cases: - - Experimenting with RagManagedDb. - - Small data size. - - Latency insensitive workload. - - Only using RAG Engine with external vector DBs. + - Experimenting with RagManagedDb. + - Small data size. + - Latency insensitive workload. + - Only using RAG Engine with external vector DBs. NOTE: This is the default tier if not explicitly chosen. diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 892c2526df..5b83a56497 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.117.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 1ace81099b..3903bbef4e 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.117.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/tests/unit/gapic/aiplatform_v1/test_data_foundry_service.py b/tests/unit/gapic/aiplatform_v1/test_data_foundry_service.py index 9a109f767e..bd00d34626 100644 --- a/tests/unit/gapic/aiplatform_v1/test_data_foundry_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_data_foundry_service.py @@ -68,9 +68,7 @@ from google.cloud.aiplatform_v1.services.data_foundry_service import ( DataFoundryServiceClient, ) -from google.cloud.aiplatform_v1.services.data_foundry_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.data_foundry_service import transports from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import data_foundry_service from google.cloud.aiplatform_v1.types import tool @@ -886,10 +884,9 @@ def test_data_foundry_service_client_get_mtls_endpoint_and_cert_source(client_cl "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py index d916d3b153..edcd8bc4e5 100644 --- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1.services.dataset_service import ( DatasetServiceAsyncClient, ) -from google.cloud.aiplatform_v1.services.dataset_service import ( - DatasetServiceClient, -) +from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceClient from google.cloud.aiplatform_v1.services.dataset_service import pagers from google.cloud.aiplatform_v1.services.dataset_service import transports from google.cloud.aiplatform_v1.types import annotation @@ -81,9 +79,7 @@ from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_service from google.cloud.aiplatform_v1.types import dataset_version -from google.cloud.aiplatform_v1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -865,10 +861,9 @@ def test_dataset_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py index e3a20d2892..b27d95d38d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( DeploymentResourcePoolServiceClient, ) -from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( - pagers, -) +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( transports, ) @@ -933,10 +931,9 @@ def test_deployment_resource_pool_service_client_get_mtls_endpoint_and_cert_sour "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index 723e8fd066..ed70d5a2a3 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1.services.endpoint_service import ( EndpointServiceAsyncClient, ) -from google.cloud.aiplatform_v1.services.endpoint_service import ( - EndpointServiceClient, -) +from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceClient from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.services.endpoint_service import transports from google.cloud.aiplatform_v1.types import accelerator_type @@ -888,10 +886,9 @@ def test_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @@ -1555,6 +1552,7 @@ def test_get_endpoint(request_type, transport: str = "grpc"): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) response = client.get_endpoint(request) @@ -1580,6 +1578,7 @@ def test_get_endpoint(request_type, transport: str = "grpc"): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True def test_get_endpoint_non_empty_request_with_auto_populated_field(): @@ -1716,6 +1715,7 @@ async def test_get_endpoint_async( dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) response = await client.get_endpoint(request) @@ -1742,6 +1742,7 @@ async def test_get_endpoint_async( assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio @@ -2443,6 +2444,7 @@ def test_update_endpoint(request_type, transport: str = "grpc"): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) response = client.update_endpoint(request) @@ -2468,6 +2470,7 @@ def test_update_endpoint(request_type, transport: str = "grpc"): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True def test_update_endpoint_non_empty_request_with_auto_populated_field(): @@ -2600,6 +2603,7 @@ async def test_update_endpoint_async( dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) response = await client.update_endpoint(request) @@ -2626,6 +2630,7 @@ async def test_update_endpoint_async( assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio @@ -6741,6 +6746,7 @@ async def test_get_endpoint_empty_call_grpc_asyncio(): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) await client.get_endpoint(request=None) @@ -6805,6 +6811,7 @@ async def test_update_endpoint_empty_call_grpc_asyncio(): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) await client.update_endpoint(request=None) @@ -7140,6 +7147,7 @@ def test_create_endpoint_rest_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -7348,6 +7356,7 @@ def test_get_endpoint_rest_call_success(request_type): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -7378,6 +7387,7 @@ def test_get_endpoint_rest_call_success(request_type): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -7765,6 +7775,7 @@ def test_update_endpoint_rest_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -7850,6 +7861,7 @@ def get_message_fields(field): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -7880,6 +7892,7 @@ def get_message_fields(field): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -9605,6 +9618,7 @@ async def test_create_endpoint_rest_asyncio_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -9832,6 +9846,7 @@ async def test_get_endpoint_rest_asyncio_call_success(request_type): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -9864,6 +9879,7 @@ async def test_get_endpoint_rest_asyncio_call_success(request_type): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio @@ -10287,6 +10303,7 @@ async def test_update_endpoint_rest_asyncio_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -10372,6 +10389,7 @@ def get_message_fields(field): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -10404,6 +10422,7 @@ def get_message_fields(field): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio diff --git a/tests/unit/gapic/aiplatform_v1/test_evaluation_service.py b/tests/unit/gapic/aiplatform_v1/test_evaluation_service.py index 7ce073681d..b2bb7d7a9e 100644 --- a/tests/unit/gapic/aiplatform_v1/test_evaluation_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_evaluation_service.py @@ -876,10 +876,9 @@ def test_evaluation_service_client_get_mtls_endpoint_and_cert_source(client_clas "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py index 106937ddb1..d624292ed6 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py @@ -83,9 +83,7 @@ from google.cloud.aiplatform_v1.types import ( feature_online_store as gca_feature_online_store, ) -from google.cloud.aiplatform_v1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1.types import feature_view from google.cloud.aiplatform_v1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1.types import feature_view_sync @@ -941,10 +939,9 @@ def test_feature_online_store_admin_service_client_get_mtls_endpoint_and_cert_so "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py index ef7b5f8a43..2c729e685c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_service.py @@ -68,9 +68,7 @@ from google.cloud.aiplatform_v1.services.feature_online_store_service import ( FeatureOnlineStoreServiceClient, ) -from google.cloud.aiplatform_v1.services.feature_online_store_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.feature_online_store_service import transports from google.cloud.aiplatform_v1.types import feature_online_store_service from google.cloud.aiplatform_v1.types import featurestore_online_service from google.cloud.aiplatform_v1.types import types @@ -910,10 +908,9 @@ def test_feature_online_store_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py index 3e098ebd22..027b2ce22a 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py @@ -72,18 +72,12 @@ from google.cloud.aiplatform_v1.services.feature_registry_service import ( FeatureRegistryServiceClient, ) -from google.cloud.aiplatform_v1.services.feature_registry_service import ( - pagers, -) -from google.cloud.aiplatform_v1.services.feature_registry_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.feature_registry_service import pagers +from google.cloud.aiplatform_v1.services.feature_registry_service import transports from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature from google.cloud.aiplatform_v1.types import feature_group -from google.cloud.aiplatform_v1.types import ( - feature_group as gca_feature_group, -) +from google.cloud.aiplatform_v1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1.types import feature_monitoring_stats from google.cloud.aiplatform_v1.types import feature_registry_service from google.cloud.aiplatform_v1.types import featurestore_service @@ -916,10 +910,9 @@ def test_feature_registry_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py index 886ded67f5..7f37b96f24 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py @@ -929,10 +929,9 @@ def test_featurestore_online_serving_service_client_get_mtls_endpoint_and_cert_s "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py index e95a10c7a7..1948a10ed6 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py @@ -73,9 +73,7 @@ FeaturestoreServiceClient, ) from google.cloud.aiplatform_v1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1.services.featurestore_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.featurestore_service import transports from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import entity_type from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type @@ -913,10 +911,9 @@ def test_featurestore_service_client_get_mtls_endpoint_and_cert_source(client_cl "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py index c824da68f3..dd85130663 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_cache_service.py @@ -69,13 +69,9 @@ GenAiCacheServiceClient, ) from google.cloud.aiplatform_v1.services.gen_ai_cache_service import pagers -from google.cloud.aiplatform_v1.services.gen_ai_cache_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.gen_ai_cache_service import transports from google.cloud.aiplatform_v1.types import cached_content -from google.cloud.aiplatform_v1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import gen_ai_cache_service @@ -892,10 +888,9 @@ def test_gen_ai_cache_service_client_get_mtls_endpoint_and_cert_source(client_cl "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @@ -4727,7 +4722,7 @@ def test_create_cached_content_rest_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", @@ -5232,7 +5227,7 @@ def test_update_cached_content_rest_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", @@ -6599,7 +6594,7 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", @@ -7140,7 +7135,7 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py index b7842ed84b..21e5de6937 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py @@ -73,9 +73,7 @@ GenAiTuningServiceClient, ) from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers -from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import transports from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import genai_tuning_service @@ -899,10 +897,9 @@ def test_gen_ai_tuning_service_client_get_mtls_endpoint_and_cert_source(client_c "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index 2ade7ea4c5..41f0436ffe 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -73,15 +73,11 @@ IndexEndpointServiceClient, ) from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1.services.index_endpoint_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.index_endpoint_service import transports from google.cloud.aiplatform_v1.types import accelerator_type from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint_service from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -910,10 +906,9 @@ def test_index_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_ "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_index_service.py b/tests/unit/gapic/aiplatform_v1/test_index_service.py index bd39d3463b..d365116d0b 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_service.py @@ -66,12 +66,8 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.index_service import ( - IndexServiceAsyncClient, -) -from google.cloud.aiplatform_v1.services.index_service import ( - IndexServiceClient, -) +from google.cloud.aiplatform_v1.services.index_service import IndexServiceAsyncClient +from google.cloud.aiplatform_v1.services.index_service import IndexServiceClient from google.cloud.aiplatform_v1.services.index_service import pagers from google.cloud.aiplatform_v1.services.index_service import transports from google.cloud.aiplatform_v1.types import deployed_index_ref @@ -838,10 +834,9 @@ def test_index_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 35bea5ae57..685ab4f40b 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -66,9 +66,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.job_service import ( - JobServiceAsyncClient, -) +from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient from google.cloud.aiplatform_v1.services.job_service import JobServiceClient from google.cloud.aiplatform_v1.services.job_service import pagers from google.cloud.aiplatform_v1.services.job_service import transports @@ -81,9 +79,7 @@ from google.cloud.aiplatform_v1.types import custom_job from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import ( - data_labeling_job as gca_data_labeling_job, -) +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import explanation @@ -865,10 +861,9 @@ def test_job_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py b/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py index 2a6fb7f7ee..f9cfc8af09 100644 --- a/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py @@ -68,9 +68,7 @@ from google.cloud.aiplatform_v1.services.llm_utility_service import ( LlmUtilityServiceClient, ) -from google.cloud.aiplatform_v1.services.llm_utility_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.llm_utility_service import transports from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import llm_utility_service from google.cloud.aiplatform_v1.types import openapi @@ -884,10 +882,9 @@ def test_llm_utility_service_client_get_mtls_endpoint_and_cert_source(client_cla "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_match_service.py b/tests/unit/gapic/aiplatform_v1/test_match_service.py index 46db0440c8..2ea2440076 100644 --- a/tests/unit/gapic/aiplatform_v1/test_match_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_match_service.py @@ -62,12 +62,8 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.match_service import ( - MatchServiceAsyncClient, -) -from google.cloud.aiplatform_v1.services.match_service import ( - MatchServiceClient, -) +from google.cloud.aiplatform_v1.services.match_service import MatchServiceAsyncClient +from google.cloud.aiplatform_v1.services.match_service import MatchServiceClient from google.cloud.aiplatform_v1.services.match_service import transports from google.cloud.aiplatform_v1.types import index from google.cloud.aiplatform_v1.types import match_service @@ -826,10 +822,9 @@ def test_match_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py index 28c1d8ebde..fbe11969c7 100644 --- a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1.services.metadata_service import ( MetadataServiceAsyncClient, ) -from google.cloud.aiplatform_v1.services.metadata_service import ( - MetadataServiceClient, -) +from google.cloud.aiplatform_v1.services.metadata_service import MetadataServiceClient from google.cloud.aiplatform_v1.services.metadata_service import pagers from google.cloud.aiplatform_v1.services.metadata_service import transports from google.cloud.aiplatform_v1.types import artifact @@ -84,14 +82,10 @@ from google.cloud.aiplatform_v1.types import execution as gca_execution from google.cloud.aiplatform_v1.types import lineage_subgraph from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1.types import metadata_service from google.cloud.aiplatform_v1.types import metadata_store -from google.cloud.aiplatform_v1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -894,10 +888,9 @@ def test_metadata_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 9061ef4f8b..ace13a6912 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1.services.migration_service import ( MigrationServiceAsyncClient, ) -from google.cloud.aiplatform_v1.services.migration_service import ( - MigrationServiceClient, -) +from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceClient from google.cloud.aiplatform_v1.services.migration_service import pagers from google.cloud.aiplatform_v1.services.migration_service import transports from google.cloud.aiplatform_v1.types import migratable_resource @@ -874,10 +872,9 @@ def test_migration_service_client_get_mtls_endpoint_and_cert_source(client_class "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py b/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py index 9f688a62b5..a25100b080 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1.services.model_garden_service import ( ModelGardenServiceClient, ) -from google.cloud.aiplatform_v1.services.model_garden_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.model_garden_service import transports from google.cloud.aiplatform_v1.types import accelerator_type from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import machine_resources @@ -893,10 +891,9 @@ def test_model_garden_service_client_get_mtls_endpoint_and_cert_source(client_cl "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py index e811b181d1..cff833ae55 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -66,12 +66,8 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.model_service import ( - ModelServiceAsyncClient, -) -from google.cloud.aiplatform_v1.services.model_service import ( - ModelServiceClient, -) +from google.cloud.aiplatform_v1.services.model_service import ModelServiceAsyncClient +from google.cloud.aiplatform_v1.services.model_service import ModelServiceClient from google.cloud.aiplatform_v1.services.model_service import pagers from google.cloud.aiplatform_v1.services.model_service import transports from google.cloud.aiplatform_v1.types import deployed_model_ref @@ -84,9 +80,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import ( - model_evaluation as gca_model_evaluation, -) +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -850,10 +844,9 @@ def test_model_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py index 93f2dd4480..463536f05b 100644 --- a/tests/unit/gapic/aiplatform_v1/test_notebook_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1.services.notebook_service import ( NotebookServiceAsyncClient, ) -from google.cloud.aiplatform_v1.services.notebook_service import ( - NotebookServiceClient, -) +from google.cloud.aiplatform_v1.services.notebook_service import NotebookServiceClient from google.cloud.aiplatform_v1.services.notebook_service import pagers from google.cloud.aiplatform_v1.services.notebook_service import transports from google.cloud.aiplatform_v1.types import accelerator_type @@ -87,9 +85,7 @@ ) from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1.types import notebook_runtime -from google.cloud.aiplatform_v1.types import ( - notebook_runtime as gca_notebook_runtime, -) +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1.types import notebook_service from google.cloud.aiplatform_v1.types import notebook_software_config @@ -898,10 +894,9 @@ def test_notebook_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py index a2f0c19303..bee6cd7798 100644 --- a/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1.services.persistent_resource_service import ( PersistentResourceServiceClient, ) -from google.cloud.aiplatform_v1.services.persistent_resource_service import ( - pagers, -) -from google.cloud.aiplatform_v1.services.persistent_resource_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers +from google.cloud.aiplatform_v1.services.persistent_resource_service import transports from google.cloud.aiplatform_v1.types import accelerator_type from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import machine_resources @@ -927,10 +923,9 @@ def test_persistent_resource_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index 08ca5e6617..76423865a7 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1.services.pipeline_service import ( PipelineServiceAsyncClient, ) -from google.cloud.aiplatform_v1.services.pipeline_service import ( - PipelineServiceClient, -) +from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceClient from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.services.pipeline_service import transports from google.cloud.aiplatform_v1.types import artifact @@ -92,9 +90,7 @@ from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import service_networking from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import ( - training_pipeline as gca_training_pipeline, -) +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.aiplatform_v1.types import value from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -900,10 +896,9 @@ def test_pipeline_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py index d39076f195..03256746a8 100644 --- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -888,10 +888,9 @@ def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_clas "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_execution_service.py b/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_execution_service.py index 80cd3804b0..423d0ee4a1 100644 --- a/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_execution_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_execution_service.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1.services.reasoning_engine_execution_service import ( transports, ) -from google.cloud.aiplatform_v1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore @@ -928,10 +926,9 @@ def test_reasoning_engine_execution_service_client_get_mtls_endpoint_and_cert_so "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_service.py b/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_service.py index dfb69c2e85..69f3dfc529 100644 --- a/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_reasoning_engine_service.py @@ -72,19 +72,13 @@ from google.cloud.aiplatform_v1.services.reasoning_engine_service import ( ReasoningEngineServiceClient, ) -from google.cloud.aiplatform_v1.services.reasoning_engine_service import ( - pagers, -) -from google.cloud.aiplatform_v1.services.reasoning_engine_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.reasoning_engine_service import pagers +from google.cloud.aiplatform_v1.services.reasoning_engine_service import transports from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import env_var from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import reasoning_engine -from google.cloud.aiplatform_v1.types import ( - reasoning_engine as gca_reasoning_engine, -) +from google.cloud.aiplatform_v1.types import reasoning_engine as gca_reasoning_engine from google.cloud.aiplatform_v1.types import reasoning_engine_service from google.cloud.aiplatform_v1.types import service_networking from google.cloud.location import locations_pb2 @@ -915,10 +909,9 @@ def test_reasoning_engine_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1/test_schedule_service.py index 15586d948d..b51c1243c6 100644 --- a/tests/unit/gapic/aiplatform_v1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_schedule_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1.services.schedule_service import ( ScheduleServiceAsyncClient, ) -from google.cloud.aiplatform_v1.services.schedule_service import ( - ScheduleServiceClient, -) +from google.cloud.aiplatform_v1.services.schedule_service import ScheduleServiceClient from google.cloud.aiplatform_v1.services.schedule_service import pagers from google.cloud.aiplatform_v1.services.schedule_service import transports from google.cloud.aiplatform_v1.types import accelerator_type @@ -899,10 +897,9 @@ def test_schedule_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py index ed02f6e458..fc78eb5bfa 100644 --- a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -72,17 +72,11 @@ from google.cloud.aiplatform_v1.services.specialist_pool_service import ( SpecialistPoolServiceClient, ) -from google.cloud.aiplatform_v1.services.specialist_pool_service import ( - pagers, -) -from google.cloud.aiplatform_v1.services.specialist_pool_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1.services.specialist_pool_service import transports from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool from google.cloud.aiplatform_v1.types import specialist_pool_service from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -908,10 +902,9 @@ def test_specialist_pool_service_client_get_mtls_endpoint_and_cert_source(client "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py index 42989535be..e1380c93b2 100644 --- a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py @@ -73,9 +73,7 @@ TensorboardServiceClient, ) from google.cloud.aiplatform_v1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1.services.tensorboard_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.tensorboard_service import transports from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import tensorboard @@ -86,9 +84,7 @@ tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1.types import tensorboard_service from google.cloud.aiplatform_v1.types import tensorboard_time_series from google.cloud.aiplatform_v1.types import ( @@ -907,10 +903,9 @@ def test_tensorboard_service_client_get_mtls_endpoint_and_cert_source(client_cla "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_vertex_rag_data_service.py b/tests/unit/gapic/aiplatform_v1/test_vertex_rag_data_service.py index bc6d7b1274..d5a894ac34 100644 --- a/tests/unit/gapic/aiplatform_v1/test_vertex_rag_data_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_vertex_rag_data_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( VertexRagDataServiceClient, ) -from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( - pagers, -) -from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( - transports, -) +from google.cloud.aiplatform_v1.services.vertex_rag_data_service import pagers +from google.cloud.aiplatform_v1.services.vertex_rag_data_service import transports from google.cloud.aiplatform_v1.types import api_auth from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import io @@ -907,10 +903,9 @@ def test_vertex_rag_data_service_client_get_mtls_endpoint_and_cert_source(client "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_vertex_rag_service.py b/tests/unit/gapic/aiplatform_v1/test_vertex_rag_service.py index df2cd735a8..88ae296742 100644 --- a/tests/unit/gapic/aiplatform_v1/test_vertex_rag_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_vertex_rag_service.py @@ -874,10 +874,9 @@ def test_vertex_rag_service_client_get_mtls_endpoint_and_cert_source(client_clas "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py index 58175ba72f..f043b67e84 100644 --- a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py @@ -66,12 +66,8 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.vizier_service import ( - VizierServiceAsyncClient, -) -from google.cloud.aiplatform_v1.services.vizier_service import ( - VizierServiceClient, -) +from google.cloud.aiplatform_v1.services.vizier_service import VizierServiceAsyncClient +from google.cloud.aiplatform_v1.services.vizier_service import VizierServiceClient from google.cloud.aiplatform_v1.services.vizier_service import pagers from google.cloud.aiplatform_v1.services.vizier_service import transports from google.cloud.aiplatform_v1.types import study @@ -850,10 +846,9 @@ def test_vizier_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index 303528e457..53be82c6c5 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -73,9 +73,7 @@ DatasetServiceClient, ) from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers -from google.cloud.aiplatform_v1beta1.services.dataset_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.dataset_service import transports from google.cloud.aiplatform_v1beta1.types import annotation from google.cloud.aiplatform_v1beta1.types import annotation_spec from google.cloud.aiplatform_v1beta1.types import content @@ -84,9 +82,7 @@ from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset from google.cloud.aiplatform_v1beta1.types import dataset_service from google.cloud.aiplatform_v1beta1.types import dataset_version -from google.cloud.aiplatform_v1beta1.types import ( - dataset_version as gca_dataset_version, -) +from google.cloud.aiplatform_v1beta1.types import dataset_version as gca_dataset_version from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import openapi @@ -872,10 +868,9 @@ def test_dataset_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py index ec79aa49ae..611e3ed190 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py @@ -83,9 +83,7 @@ from google.cloud.aiplatform_v1beta1.types import ( deployment_resource_pool as gca_deployment_resource_pool, ) -from google.cloud.aiplatform_v1beta1.types import ( - deployment_resource_pool_service, -) +from google.cloud.aiplatform_v1beta1.types import deployment_resource_pool_service from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import endpoint from google.cloud.aiplatform_v1beta1.types import machine_resources @@ -936,10 +934,9 @@ def test_deployment_resource_pool_service_client_get_mtls_endpoint_and_cert_sour "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index ab1205dcd1..d76e16c503 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -73,9 +73,7 @@ EndpointServiceClient, ) from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.services.endpoint_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.endpoint_service import transports from google.cloud.aiplatform_v1beta1.types import accelerator_type from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import endpoint @@ -890,10 +888,9 @@ def test_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @@ -1557,6 +1554,7 @@ def test_get_endpoint(request_type, transport: str = "grpc"): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) response = client.get_endpoint(request) @@ -1582,6 +1580,7 @@ def test_get_endpoint(request_type, transport: str = "grpc"): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True def test_get_endpoint_non_empty_request_with_auto_populated_field(): @@ -1718,6 +1717,7 @@ async def test_get_endpoint_async( dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) response = await client.get_endpoint(request) @@ -1744,6 +1744,7 @@ async def test_get_endpoint_async( assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio @@ -2443,6 +2444,7 @@ def test_update_endpoint(request_type, transport: str = "grpc"): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) response = client.update_endpoint(request) @@ -2468,6 +2470,7 @@ def test_update_endpoint(request_type, transport: str = "grpc"): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True def test_update_endpoint_non_empty_request_with_auto_populated_field(): @@ -2600,6 +2603,7 @@ async def test_update_endpoint_async( dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) response = await client.update_endpoint(request) @@ -2626,6 +2630,7 @@ async def test_update_endpoint_async( assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio @@ -7891,6 +7896,7 @@ async def test_get_endpoint_empty_call_grpc_asyncio(): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) await client.get_endpoint(request=None) @@ -7955,6 +7961,7 @@ async def test_update_endpoint_empty_call_grpc_asyncio(): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) ) await client.update_endpoint(request=None) @@ -8364,6 +8371,7 @@ def test_create_endpoint_rest_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -8572,6 +8580,7 @@ def test_get_endpoint_rest_call_success(request_type): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -8602,6 +8611,7 @@ def test_get_endpoint_rest_call_success(request_type): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -9009,6 +9019,7 @@ def test_update_endpoint_rest_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -9094,6 +9105,7 @@ def get_message_fields(field): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -9124,6 +9136,7 @@ def get_message_fields(field): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -11172,6 +11185,7 @@ async def test_create_endpoint_rest_asyncio_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -11399,6 +11413,7 @@ async def test_get_endpoint_rest_asyncio_call_success(request_type): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -11431,6 +11446,7 @@ async def test_get_endpoint_rest_asyncio_call_success(request_type): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio @@ -11874,6 +11890,7 @@ async def test_update_endpoint_rest_asyncio_call_success(request_type): "satisfies_pzs": True, "satisfies_pzi": True, "gen_ai_advanced_features_config": {"rag_config": {"enable_rag": True}}, + "private_model_server_enabled": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -11959,6 +11976,7 @@ def get_message_fields(field): dedicated_endpoint_dns="dedicated_endpoint_dns_value", satisfies_pzs=True, satisfies_pzi=True, + private_model_server_enabled=True, ) # Wrap the value into a proper Response obj @@ -11991,6 +12009,7 @@ def get_message_fields(field): assert response.dedicated_endpoint_dns == "dedicated_endpoint_dns_value" assert response.satisfies_pzs is True assert response.satisfies_pzi is True + assert response.private_model_server_enabled is True @pytest.mark.asyncio diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py index 11550bf572..a9cdc0012a 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1beta1.services.evaluation_service import ( EvaluationServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.evaluation_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.evaluation_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import evaluation_service from google.cloud.aiplatform_v1beta1.types import io @@ -887,10 +885,9 @@ def test_evaluation_service_client_get_mtls_endpoint_and_cert_source(client_clas "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_example_store_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_example_store_service.py index d224edcafc..22931821c0 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_example_store_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_example_store_service.py @@ -72,18 +72,12 @@ from google.cloud.aiplatform_v1beta1.services.example_store_service import ( ExampleStoreServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.example_store_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.example_store_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.example_store_service import pagers +from google.cloud.aiplatform_v1beta1.services.example_store_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import example from google.cloud.aiplatform_v1beta1.types import example_store -from google.cloud.aiplatform_v1beta1.types import ( - example_store as gca_example_store, -) +from google.cloud.aiplatform_v1beta1.types import example_store as gca_example_store from google.cloud.aiplatform_v1beta1.types import example_store_service from google.cloud.aiplatform_v1beta1.types import tool from google.cloud.location import locations_pb2 @@ -911,10 +905,9 @@ def test_example_store_service_client_get_mtls_endpoint_and_cert_source(client_c "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py index 6f1f00c412..5082f862d1 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py @@ -910,10 +910,9 @@ def test_extension_execution_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py index 95c2b32956..b6fdb6b86f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( ExtensionRegistryServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import pagers from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( transports, ) @@ -917,10 +915,9 @@ def test_extension_registry_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py index 6725905cce..44e7f9ab9f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py @@ -83,13 +83,9 @@ from google.cloud.aiplatform_v1beta1.types import ( feature_online_store as gca_feature_online_store, ) -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_admin_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_admin_service from google.cloud.aiplatform_v1beta1.types import feature_view -from google.cloud.aiplatform_v1beta1.types import ( - feature_view as gca_feature_view, -) +from google.cloud.aiplatform_v1beta1.types import feature_view as gca_feature_view from google.cloud.aiplatform_v1beta1.types import feature_view_sync from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -943,10 +939,9 @@ def test_feature_online_store_admin_service_client_get_mtls_endpoint_and_cert_so "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py index 0de8b7f8e5..571a4b1c2b 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_service.py @@ -71,9 +71,7 @@ from google.cloud.aiplatform_v1beta1.services.feature_online_store_service import ( transports, ) -from google.cloud.aiplatform_v1beta1.types import ( - feature_online_store_service, -) +from google.cloud.aiplatform_v1beta1.types import feature_online_store_service from google.cloud.aiplatform_v1beta1.types import featurestore_online_service from google.cloud.aiplatform_v1beta1.types import types from google.cloud.location import locations_pb2 @@ -912,10 +910,9 @@ def test_feature_online_store_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py index 2ad6c17495..eadbf9a8ad 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py @@ -72,22 +72,14 @@ from google.cloud.aiplatform_v1beta1.services.feature_registry_service import ( FeatureRegistryServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.feature_registry_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.feature_registry_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.feature_registry_service import pagers +from google.cloud.aiplatform_v1beta1.services.feature_registry_service import transports from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import feature_group -from google.cloud.aiplatform_v1beta1.types import ( - feature_group as gca_feature_group, -) +from google.cloud.aiplatform_v1beta1.types import feature_group as gca_feature_group from google.cloud.aiplatform_v1beta1.types import feature_monitor -from google.cloud.aiplatform_v1beta1.types import ( - feature_monitor as gca_feature_monitor, -) +from google.cloud.aiplatform_v1beta1.types import feature_monitor as gca_feature_monitor from google.cloud.aiplatform_v1beta1.types import feature_monitor_job from google.cloud.aiplatform_v1beta1.types import ( feature_monitor_job as gca_feature_monitor_job, @@ -930,10 +922,9 @@ def test_feature_registry_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py index b72026cefe..48a5ed002c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py @@ -929,10 +929,9 @@ def test_featurestore_online_serving_service_client_get_mtls_endpoint_and_cert_s "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index 7498e1af48..acca5079c3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -72,26 +72,18 @@ from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( FeaturestoreServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.services.featurestore_service import transports from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import ( - entity_type as gca_entity_type, -) +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type from google.cloud.aiplatform_v1beta1.types import feature from google.cloud.aiplatform_v1beta1.types import feature as gca_feature from google.cloud.aiplatform_v1beta1.types import feature_monitor from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats from google.cloud.aiplatform_v1beta1.types import feature_selector from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import ( - featurestore as gca_featurestore, -) +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring from google.cloud.aiplatform_v1beta1.types import featurestore_service from google.cloud.aiplatform_v1beta1.types import io @@ -922,10 +914,9 @@ def test_featurestore_service_client_get_mtls_endpoint_and_cert_source(client_cl "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py index 29a6f22a77..262cd43467 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py @@ -68,16 +68,10 @@ from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import ( GenAiCacheServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import pagers +from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import transports from google.cloud.aiplatform_v1beta1.types import cached_content -from google.cloud.aiplatform_v1beta1.types import ( - cached_content as gca_cached_content, -) +from google.cloud.aiplatform_v1beta1.types import cached_content as gca_cached_content from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import gen_ai_cache_service @@ -894,10 +888,9 @@ def test_gen_ai_cache_service_client_get_mtls_endpoint_and_cert_source(client_cl "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @@ -4740,7 +4733,7 @@ def test_create_cached_content_rest_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", @@ -5256,7 +5249,7 @@ def test_update_cached_content_rest_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", @@ -6634,7 +6627,7 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", @@ -7186,7 +7179,7 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "google_search_retrieval": { "dynamic_retrieval_config": {"mode": 1, "dynamic_threshold": 0.1809} }, - "google_maps": {}, + "google_maps": {"enable_widget": True}, "enterprise_web_search": { "exclude_domains": [ "exclude_domains_value1", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py index 52ca98111a..4506f9d553 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import ( GenAiTuningServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import pagers +from google.cloud.aiplatform_v1beta1.services.gen_ai_tuning_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import evaluation_service @@ -86,9 +82,7 @@ from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import tool from google.cloud.aiplatform_v1beta1.types import tuning_job -from google.cloud.aiplatform_v1beta1.types import ( - tuning_job as gca_tuning_job, -) +from google.cloud.aiplatform_v1beta1.types import tuning_job as gca_tuning_job from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore @@ -904,10 +898,9 @@ def test_gen_ai_tuning_service_client_get_mtls_endpoint_and_cert_source(client_c "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @@ -4574,6 +4567,11 @@ def test_create_tuning_job_rest_call_success(request_type): request_init = {"parent": "projects/sample1/locations/sample2"} request_init["tuning_job"] = { "base_model": "base_model_value", + "pre_tuned_model": { + "tuned_model_name": "tuned_model_name_value", + "checkpoint_id": "checkpoint_id_value", + "base_model": "base_model_value", + }, "supervised_tuning_spec": { "training_dataset_uri": "training_dataset_uri_value", "validation_dataset_uri": "validation_dataset_uri_value", @@ -6320,6 +6318,11 @@ async def test_create_tuning_job_rest_asyncio_call_success(request_type): request_init = {"parent": "projects/sample1/locations/sample2"} request_init["tuning_job"] = { "base_model": "base_model_value", + "pre_tuned_model": { + "tuned_model_name": "tuned_model_name_value", + "checkpoint_id": "checkpoint_id_value", + "base_model": "base_model_value", + }, "supervised_tuning_spec": { "training_dataset_uri": "training_dataset_uri_value", "validation_dataset_uri": "validation_dataset_uri_value", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py index 838bf05cf1..c81a494031 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -72,18 +72,12 @@ from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( IndexEndpointServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import transports from google.cloud.aiplatform_v1beta1.types import accelerator_type from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import ( - index_endpoint as gca_index_endpoint, -) +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint from google.cloud.aiplatform_v1beta1.types import index_endpoint_service from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -913,10 +907,9 @@ def test_index_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_ "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py index 259e690c9c..ab105775f8 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1beta1.services.index_service import ( IndexServiceAsyncClient, ) -from google.cloud.aiplatform_v1beta1.services.index_service import ( - IndexServiceClient, -) +from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceClient from google.cloud.aiplatform_v1beta1.services.index_service import pagers from google.cloud.aiplatform_v1beta1.services.index_service import transports from google.cloud.aiplatform_v1beta1.types import deployed_index_ref @@ -838,10 +836,9 @@ def test_index_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index 70fcfccb76..47ee90efd5 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -66,12 +66,8 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.job_service import ( - JobServiceAsyncClient, -) -from google.cloud.aiplatform_v1beta1.services.job_service import ( - JobServiceClient, -) +from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceClient from google.cloud.aiplatform_v1beta1.services.job_service import pagers from google.cloud.aiplatform_v1beta1.services.job_service import transports from google.cloud.aiplatform_v1beta1.types import accelerator_type @@ -81,9 +77,7 @@ ) from google.cloud.aiplatform_v1beta1.types import completion_stats from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import ( - custom_job as gca_custom_job, -) +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job from google.cloud.aiplatform_v1beta1.types import data_labeling_job from google.cloud.aiplatform_v1beta1.types import ( data_labeling_job as gca_data_labeling_job, @@ -101,13 +95,9 @@ from google.cloud.aiplatform_v1beta1.types import job_service from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import ( - manual_batch_tuning_parameters, -) +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import ( - model_deployment_monitoring_job, -) +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, ) @@ -874,10 +864,9 @@ def test_job_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py index 96611308bd..0fe4c70903 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_llm_utility_service.py @@ -68,9 +68,7 @@ from google.cloud.aiplatform_v1beta1.services.llm_utility_service import ( LlmUtilityServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.llm_utility_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.llm_utility_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import llm_utility_service from google.cloud.aiplatform_v1beta1.types import tool @@ -882,10 +880,9 @@ def test_llm_utility_service_client_get_mtls_endpoint_and_cert_source(client_cla "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py index e484f70654..c27640b643 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_match_service.py @@ -65,9 +65,7 @@ from google.cloud.aiplatform_v1beta1.services.match_service import ( MatchServiceAsyncClient, ) -from google.cloud.aiplatform_v1beta1.services.match_service import ( - MatchServiceClient, -) +from google.cloud.aiplatform_v1beta1.services.match_service import MatchServiceClient from google.cloud.aiplatform_v1beta1.services.match_service import transports from google.cloud.aiplatform_v1beta1.types import index from google.cloud.aiplatform_v1beta1.types import match_service @@ -826,10 +824,9 @@ def test_match_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_memory_bank_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_memory_bank_service.py index c0e95d4476..99c5db1cb8 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_memory_bank_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_memory_bank_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1beta1.services.memory_bank_service import ( MemoryBankServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.memory_bank_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.memory_bank_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.memory_bank_service import pagers +from google.cloud.aiplatform_v1beta1.services.memory_bank_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import memory_bank from google.cloud.aiplatform_v1beta1.types import memory_bank_service @@ -893,10 +889,9 @@ def test_memory_bank_service_client_get_mtls_endpoint_and_cert_source(client_cla "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py index b541a42b0d..2ba6e01fb1 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -73,9 +73,7 @@ MetadataServiceClient, ) from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers -from google.cloud.aiplatform_v1beta1.services.metadata_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.metadata_service import transports from google.cloud.aiplatform_v1beta1.types import artifact from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact from google.cloud.aiplatform_v1beta1.types import context @@ -86,14 +84,10 @@ from google.cloud.aiplatform_v1beta1.types import execution as gca_execution from google.cloud.aiplatform_v1beta1.types import lineage_subgraph from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import ( - metadata_schema as gca_metadata_schema, -) +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema from google.cloud.aiplatform_v1beta1.types import metadata_service from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import ( - metadata_store as gca_metadata_store, -) +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -896,10 +890,9 @@ def test_metadata_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 3c96b72667..af4ae4e5a0 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -73,9 +73,7 @@ MigrationServiceClient, ) from google.cloud.aiplatform_v1beta1.services.migration_service import pagers -from google.cloud.aiplatform_v1beta1.services.migration_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.migration_service import transports from google.cloud.aiplatform_v1beta1.types import migratable_resource from google.cloud.aiplatform_v1beta1.types import migration_service from google.cloud.location import locations_pb2 @@ -876,10 +874,9 @@ def test_migration_service_client_get_mtls_endpoint_and_cert_source(client_class "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @@ -5406,22 +5403,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -5431,19 +5425,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py index 270f4bd823..1384cbf85b 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1beta1.services.model_garden_service import ( ModelGardenServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.model_garden_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.model_garden_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.model_garden_service import pagers +from google.cloud.aiplatform_v1beta1.services.model_garden_service import transports from google.cloud.aiplatform_v1beta1.types import accelerator_type from google.cloud.aiplatform_v1beta1.types import env_var from google.cloud.aiplatform_v1beta1.types import io @@ -86,6 +82,7 @@ from google.cloud.aiplatform_v1beta1.types import model_garden_service from google.cloud.aiplatform_v1beta1.types import publisher_model from google.cloud.aiplatform_v1beta1.types import reservation_affinity +from google.cloud.aiplatform_v1beta1.types import service_networking from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore @@ -897,10 +894,9 @@ def test_model_garden_service_client_get_mtls_endpoint_and_cert_source(client_cl "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py index a559adbbc1..102038c567 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( ModelMonitoringServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import pagers +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import transports from google.cloud.aiplatform_v1beta1.types import accelerator_type from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import explanation @@ -86,9 +82,7 @@ from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import model_monitor -from google.cloud.aiplatform_v1beta1.types import ( - model_monitor as gca_model_monitor, -) +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert from google.cloud.aiplatform_v1beta1.types import model_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( @@ -931,10 +925,9 @@ def test_model_monitoring_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py index efb5161661..f1b3774a23 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1beta1.services.model_service import ( ModelServiceAsyncClient, ) -from google.cloud.aiplatform_v1beta1.services.model_service import ( - ModelServiceClient, -) +from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceClient from google.cloud.aiplatform_v1beta1.services.model_service import pagers from google.cloud.aiplatform_v1beta1.services.model_service import transports from google.cloud.aiplatform_v1beta1.types import deployed_model_ref @@ -850,10 +848,9 @@ def test_model_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py index 15093005c1..eac5e7720f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py @@ -73,9 +73,7 @@ NotebookServiceClient, ) from google.cloud.aiplatform_v1beta1.services.notebook_service import pagers -from google.cloud.aiplatform_v1beta1.services.notebook_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.notebook_service import transports from google.cloud.aiplatform_v1beta1.types import accelerator_type from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import env_var @@ -87,16 +85,12 @@ from google.cloud.aiplatform_v1beta1.types import ( notebook_execution_job as gca_notebook_execution_job, ) -from google.cloud.aiplatform_v1beta1.types import ( - notebook_idle_shutdown_config, -) +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config from google.cloud.aiplatform_v1beta1.types import notebook_runtime from google.cloud.aiplatform_v1beta1.types import ( notebook_runtime as gca_notebook_runtime, ) -from google.cloud.aiplatform_v1beta1.types import ( - notebook_runtime_template_ref, -) +from google.cloud.aiplatform_v1beta1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1beta1.types import notebook_service from google.cloud.aiplatform_v1beta1.types import notebook_software_config from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -904,10 +898,9 @@ def test_notebook_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py index b553eec53f..dda8a16969 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( PersistentResourceServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( - pagers, -) +from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( transports, ) @@ -927,10 +925,9 @@ def test_persistent_resource_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py index f6fe3570a7..07fd60a4f6 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -73,9 +73,7 @@ PipelineServiceClient, ) from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1beta1.services.pipeline_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.pipeline_service import transports from google.cloud.aiplatform_v1beta1.types import artifact from google.cloud.aiplatform_v1beta1.types import context from google.cloud.aiplatform_v1beta1.types import deployed_model_ref @@ -89,9 +87,7 @@ from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import pipeline_failure_policy from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import ( - pipeline_job as gca_pipeline_job, -) +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import pipeline_state from google.cloud.aiplatform_v1beta1.types import service_networking @@ -905,10 +901,9 @@ def test_pipeline_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py index 0699520b90..ef01db8dcc 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -69,9 +69,7 @@ from google.cloud.aiplatform_v1beta1.services.prediction_service import ( PredictionServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.prediction_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.prediction_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import io @@ -890,10 +888,9 @@ def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_clas "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py index 8221205f47..08bcadd4f8 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_execution_service.py @@ -72,9 +72,7 @@ from google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service import ( transports, ) -from google.cloud.aiplatform_v1beta1.types import ( - reasoning_engine_execution_service, -) +from google.cloud.aiplatform_v1beta1.types import reasoning_engine_execution_service from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore @@ -928,10 +926,9 @@ def test_reasoning_engine_execution_service_client_get_mtls_endpoint_and_cert_so "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py index 2f1bb53387..e8bffe6a66 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import ( ReasoningEngineServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import pagers +from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import transports from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import env_var from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -916,10 +912,9 @@ def test_reasoning_engine_service_client_get_mtls_endpoint_and_cert_source( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py index f9508aa436..43a180eb6f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py @@ -73,9 +73,7 @@ ScheduleServiceClient, ) from google.cloud.aiplatform_v1beta1.services.schedule_service import pagers -from google.cloud.aiplatform_v1beta1.services.schedule_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.schedule_service import transports from google.cloud.aiplatform_v1beta1.types import accelerator_type from google.cloud.aiplatform_v1beta1.types import artifact from google.cloud.aiplatform_v1beta1.types import context @@ -910,10 +908,9 @@ def test_schedule_service_client_get_mtls_endpoint_and_cert_source(client_class) "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py index b9b8725211..a12faa4603 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py @@ -73,9 +73,7 @@ SessionServiceClient, ) from google.cloud.aiplatform_v1beta1.services.session_service import pagers -from google.cloud.aiplatform_v1beta1.services.session_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.session_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import session @@ -861,10 +859,9 @@ def test_session_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @@ -6845,6 +6842,15 @@ def test_append_event_rest_call_success(request_type): "title": "title_value", "text": "text_value", "place_id": "place_id_value", + "place_answer_sources": { + "review_snippets": [ + { + "review_id": "review_id_value", + "google_maps_uri": "google_maps_uri_value", + "title": "title_value", + } + ] + }, }, } ], @@ -6862,6 +6868,12 @@ def test_append_event_rest_call_success(request_type): ], "retrieval_metadata": {"google_search_dynamic_retrieval_score": 0.3902}, "google_maps_widget_context_token": "google_maps_widget_context_token_value", + "source_flagging_uris": [ + { + "source_id": "source_id_value", + "flag_content_uri": "flag_content_uri_value", + } + ], }, "partial": True, "turn_complete": True, @@ -9028,6 +9040,15 @@ async def test_append_event_rest_asyncio_call_success(request_type): "title": "title_value", "text": "text_value", "place_id": "place_id_value", + "place_answer_sources": { + "review_snippets": [ + { + "review_id": "review_id_value", + "google_maps_uri": "google_maps_uri_value", + "title": "title_value", + } + ] + }, }, } ], @@ -9045,6 +9066,12 @@ async def test_append_event_rest_asyncio_call_success(request_type): ], "retrieval_metadata": {"google_search_dynamic_retrieval_score": 0.3902}, "google_maps_widget_context_token": "google_maps_widget_context_token_value", + "source_flagging_uris": [ + { + "source_id": "source_id_value", + "flag_content_uri": "flag_content_uri_value", + } + ], }, "partial": True, "turn_complete": True, diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py index 4a8e99c711..174e4470b3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -72,17 +72,11 @@ from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import ( SpecialistPoolServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import transports from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import ( - specialist_pool as gca_specialist_pool, -) +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool from google.cloud.aiplatform_v1beta1.types import specialist_pool_service from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -908,10 +902,9 @@ def test_specialist_pool_service_client_get_mtls_endpoint_and_cert_source(client "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py index 86ee2b8553..e10bb99c87 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -72,27 +72,19 @@ from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( TensorboardServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import transports from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard as gca_tensorboard, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard from google.cloud.aiplatform_v1beta1.types import tensorboard_data from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment from google.cloud.aiplatform_v1beta1.types import ( tensorboard_experiment as gca_tensorboard_experiment, ) from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import ( - tensorboard_run as gca_tensorboard_run, -) +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run from google.cloud.aiplatform_v1beta1.types import tensorboard_service from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series from google.cloud.aiplatform_v1beta1.types import ( @@ -911,10 +903,9 @@ def test_tensorboard_service_client_get_mtls_endpoint_and_cert_source(client_cla "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py index 2a9db4ea70..2e42e3daaa 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py @@ -72,12 +72,8 @@ from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( VertexRagDataServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( - pagers, -) -from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import transports from google.cloud.aiplatform_v1beta1.types import api_auth from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import io @@ -907,10 +903,9 @@ def test_vertex_rag_data_service_client_get_mtls_endpoint_and_cert_source(client "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py index 2cb8d7bb0a..a6ecd92ea8 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py @@ -68,9 +68,7 @@ from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import ( VertexRagServiceClient, ) -from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import transports from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import content as gca_content from google.cloud.aiplatform_v1beta1.types import tool @@ -876,10 +874,9 @@ def test_vertex_rag_service_client_get_mtls_endpoint_and_cert_source(client_clas "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py index 1f35c39b45..66a9330e69 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -69,13 +69,9 @@ from google.cloud.aiplatform_v1beta1.services.vizier_service import ( VizierServiceAsyncClient, ) -from google.cloud.aiplatform_v1beta1.services.vizier_service import ( - VizierServiceClient, -) +from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceClient from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers -from google.cloud.aiplatform_v1beta1.services.vizier_service import ( - transports, -) +from google.cloud.aiplatform_v1beta1.services.vizier_service import transports from google.cloud.aiplatform_v1beta1.types import study from google.cloud.aiplatform_v1beta1.types import study as gca_study from google.cloud.aiplatform_v1beta1.types import vizier_service @@ -852,10 +848,9 @@ def test_vizier_service_client_get_mtls_endpoint_and_cert_source(client_class): "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() + api_endpoint, cert_source = ( + client_class.get_mtls_endpoint_and_cert_source() + ) assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source