diff --git a/java-aiplatform/README.md b/java-aiplatform/README.md
index 72ffc1b738c7..3de6f858c3e9 100644
--- a/java-aiplatform/README.md
+++ b/java-aiplatform/README.md
@@ -19,20 +19,20 @@ If you are using Maven, add this to your pom.xml file:
Updatable fields: *
* `labels` * `online_serving_config.fixed_node_count` *
- * `online_serving_config.scaling`
+ * `online_serving_config.scaling` * `online_storage_ttl_days` (available in Preview)
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture Sample code:
+ *
+ * Sample code:
+ *
+ * Sample code:
+ *
+ * Sample code:
+ *
+ * This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
@@ -978,7 +1099,7 @@ public final TensorboardExperiment createTensorboardExperiment(
* `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
* @param tensorboardExperiment The TensorboardExperiment to create.
* @param tensorboardExperimentId Required. The ID to use for the Tensorboard experiment, which
- * will become the final component of the Tensorboard experiment's resource name.
+ * becomes the final component of the Tensorboard experiment's resource name.
* This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
@@ -1212,9 +1333,9 @@ public final TensorboardExperiment getTensorboardExperiment(
* `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
* @param updateMask Required. Field mask is used to specify the fields to be overwritten in the
* TensorboardExperiment resource by the update. The fields specified in the update_mask are
- * relative to the resource, not the full request. A field will be overwritten if it is in the
- * mask. If the user does not provide a mask then all fields will be overwritten if new values
- * are specified.
+ * relative to the resource, not the full request. A field is overwritten if it's in the mask.
+ * If the user does not provide a mask then all fields are overwritten if new values are
+ * specified.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final TensorboardExperiment updateTensorboardExperiment(
@@ -1658,7 +1779,7 @@ public final OperationFuture This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
@@ -1701,7 +1822,7 @@ public final TensorboardRun createTensorboardRun(
* TensorboardRun in. Format:
* `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
* @param tensorboardRun Required. The TensorboardRun to create.
- * @param tensorboardRunId Required. The ID to use for the Tensorboard run, which will become the
+ * @param tensorboardRunId Required. The ID to use for the Tensorboard run, which becomes the
* final component of the Tensorboard run's resource name.
* This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
@@ -2078,9 +2199,8 @@ public final UnaryCallable Sample code:
*
@@ -3485,8 +3605,8 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
/**
* Reads multiple TensorboardTimeSeries' data. The data point number limit is 1000 for scalars,
* 100 for tensors and blob references. If the number of data points stored is less than the
- * limit, all data will be returned. Otherwise, that limit number of data points will be randomly
- * selected from this time series and returned.
+ * limit, all data is returned. Otherwise, the number limit of data points is randomly selected
+ * from this time series and returned.
*
* Sample code:
*
@@ -3523,8 +3643,8 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
/**
* Reads multiple TensorboardTimeSeries' data. The data point number limit is 1000 for scalars,
* 100 for tensors and blob references. If the number of data points stored is less than the
- * limit, all data will be returned. Otherwise, that limit number of data points will be randomly
- * selected from this time series and returned.
+ * limit, all data is returned. Otherwise, the number limit of data points is randomly selected
+ * from this time series and returned.
*
* Sample code:
*
@@ -3558,8 +3678,8 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
/**
* Reads multiple TensorboardTimeSeries' data. The data point number limit is 1000 for scalars,
* 100 for tensors and blob references. If the number of data points stored is less than the
- * limit, all data will be returned. Otherwise, that limit number of data points will be randomly
- * selected from this time series and returned.
+ * limit, all data is returned. Otherwise, the number limit of data points is randomly selected
+ * from this time series and returned.
*
* Sample code:
*
@@ -3592,9 +3712,9 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3631,9 +3751,9 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3675,9 +3795,9 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3718,9 +3838,9 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3802,7 +3922,7 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3844,7 +3964,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3885,7 +4005,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3920,7 +4040,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3955,7 +4075,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3996,7 +4116,7 @@ public final WriteTensorboardRunDataResponse writeTensorboardRunData(
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -4037,7 +4157,7 @@ public final WriteTensorboardRunDataResponse writeTensorboardRunData(
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -4072,7 +4192,7 @@ public final WriteTensorboardRunDataResponse writeTensorboardRunData(
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
diff --git a/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1/TensorboardServiceSettings.java b/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1/TensorboardServiceSettings.java
index 2cb6f728c27d..f172094f26bc 100644
--- a/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1/TensorboardServiceSettings.java
+++ b/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1/TensorboardServiceSettings.java
@@ -111,6 +111,12 @@ public UnaryCallSettings Updatable fields:
* * `labels` * `online_serving_config.fixed_node_count` *
- * `online_serving_config.scaling` * `online_storage_ttl_days`
+ * `online_serving_config.scaling` * `online_storage_ttl_days` (available in Preview)
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final OperationFuture Sample code:
+ *
+ * Sample code:
+ *
+ * Sample code:
+ *
+ * Sample code:
+ *
+ * This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
@@ -980,7 +1101,7 @@ public final TensorboardExperiment createTensorboardExperiment(
* `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
* @param tensorboardExperiment The TensorboardExperiment to create.
* @param tensorboardExperimentId Required. The ID to use for the Tensorboard experiment, which
- * will become the final component of the Tensorboard experiment's resource name.
+ * becomes the final component of the Tensorboard experiment's resource name.
* This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
@@ -1214,9 +1335,9 @@ public final TensorboardExperiment getTensorboardExperiment(
* `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
* @param updateMask Required. Field mask is used to specify the fields to be overwritten in the
* TensorboardExperiment resource by the update. The fields specified in the update_mask are
- * relative to the resource, not the full request. A field will be overwritten if it is in the
- * mask. If the user does not provide a mask then all fields will be overwritten if new values
- * are specified.
+ * relative to the resource, not the full request. A field is overwritten if it's in the mask.
+ * If the user does not provide a mask then all fields are overwritten if new values are
+ * specified.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final TensorboardExperiment updateTensorboardExperiment(
@@ -1660,7 +1781,7 @@ public final OperationFuture This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
@@ -1703,7 +1824,7 @@ public final TensorboardRun createTensorboardRun(
* TensorboardRun in. Format:
* `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`
* @param tensorboardRun Required. The TensorboardRun to create.
- * @param tensorboardRunId Required. The ID to use for the Tensorboard run, which will become the
+ * @param tensorboardRunId Required. The ID to use for the Tensorboard run, which becomes the
* final component of the Tensorboard run's resource name.
* This value should be 1-128 characters, and valid characters are /[a-z][0-9]-/.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
@@ -2080,9 +2201,8 @@ public final UnaryCallable Sample code:
*
@@ -3487,8 +3607,8 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
/**
* Reads multiple TensorboardTimeSeries' data. The data point number limit is 1000 for scalars,
* 100 for tensors and blob references. If the number of data points stored is less than the
- * limit, all data will be returned. Otherwise, that limit number of data points will be randomly
- * selected from this time series and returned.
+ * limit, all data is returned. Otherwise, the number limit of data points is randomly selected
+ * from this time series and returned.
*
* Sample code:
*
@@ -3525,8 +3645,8 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
/**
* Reads multiple TensorboardTimeSeries' data. The data point number limit is 1000 for scalars,
* 100 for tensors and blob references. If the number of data points stored is less than the
- * limit, all data will be returned. Otherwise, that limit number of data points will be randomly
- * selected from this time series and returned.
+ * limit, all data is returned. Otherwise, the number limit of data points is randomly selected
+ * from this time series and returned.
*
* Sample code:
*
@@ -3560,8 +3680,8 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
/**
* Reads multiple TensorboardTimeSeries' data. The data point number limit is 1000 for scalars,
* 100 for tensors and blob references. If the number of data points stored is less than the
- * limit, all data will be returned. Otherwise, that limit number of data points will be randomly
- * selected from this time series and returned.
+ * limit, all data is returned. Otherwise, the number limit of data points is randomly selected
+ * from this time series and returned.
*
* Sample code:
*
@@ -3594,9 +3714,9 @@ public final BatchReadTensorboardTimeSeriesDataResponse batchReadTensorboardTime
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3633,9 +3753,9 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3677,9 +3797,9 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3720,9 +3840,9 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Reads a TensorboardTimeSeries' data. By default, if the number of data points stored is less
- * than 1000, all data will be returned. Otherwise, 1000 data points will be randomly selected
- * from this time series and returned. This value can be changed by changing max_data_points,
- * which can't be greater than 10k.
+ * than 1000, all data is returned. Otherwise, 1000 data points is randomly selected from this
+ * time series and returned. This value can be changed by changing max_data_points, which can't be
+ * greater than 10k.
*
* Sample code:
*
@@ -3804,7 +3924,7 @@ public final ReadTensorboardTimeSeriesDataResponse readTensorboardTimeSeriesData
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3846,7 +3966,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3887,7 +4007,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3922,7 +4042,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points of multiple TensorboardTimeSeries in multiple TensorboardRun's.
- * If any data fail to be ingested, an error will be returned.
+ * If any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3957,7 +4077,7 @@ public final WriteTensorboardExperimentDataResponse writeTensorboardExperimentDa
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -3998,7 +4118,7 @@ public final WriteTensorboardRunDataResponse writeTensorboardRunData(
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -4039,7 +4159,7 @@ public final WriteTensorboardRunDataResponse writeTensorboardRunData(
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
@@ -4074,7 +4194,7 @@ public final WriteTensorboardRunDataResponse writeTensorboardRunData(
// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Write time series data points into multiple TensorboardTimeSeries under a TensorboardRun. If
- * any data fail to be ingested, an error will be returned.
+ * any data fail to be ingested, an error is returned.
*
* Sample code:
*
diff --git a/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1beta1/TensorboardServiceSettings.java b/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1beta1/TensorboardServiceSettings.java
index 7524971a31b7..b44845eca218 100644
--- a/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1beta1/TensorboardServiceSettings.java
+++ b/java-aiplatform/google-cloud-aiplatform/src/main/java/com/google/cloud/aiplatform/v1beta1/TensorboardServiceSettings.java
@@ -112,6 +112,12 @@ public UnaryCallSettings{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * TensorboardName tensorboard = TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]");
+ * ReadTensorboardUsageResponse response =
+ * tensorboardServiceClient.readTensorboardUsage(tensorboard);
+ * }
+ * }
+ *
+ * @param tensorboard Required. The name of the Tensorboard resource. Format:
+ * `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ReadTensorboardUsageResponse readTensorboardUsage(TensorboardName tensorboard) {
+ ReadTensorboardUsageRequest request =
+ ReadTensorboardUsageRequest.newBuilder()
+ .setTensorboard(tensorboard == null ? null : tensorboard.toString())
+ .build();
+ return readTensorboardUsage(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD.
+ /**
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ * {@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * String tensorboard =
+ * TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]").toString();
+ * ReadTensorboardUsageResponse response =
+ * tensorboardServiceClient.readTensorboardUsage(tensorboard);
+ * }
+ * }
+ *
+ * @param tensorboard Required. The name of the Tensorboard resource. Format:
+ * `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ReadTensorboardUsageResponse readTensorboardUsage(String tensorboard) {
+ ReadTensorboardUsageRequest request =
+ ReadTensorboardUsageRequest.newBuilder().setTensorboard(tensorboard).build();
+ return readTensorboardUsage(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD.
+ /**
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ * {@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * ReadTensorboardUsageRequest request =
+ * ReadTensorboardUsageRequest.newBuilder()
+ * .setTensorboard(
+ * TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]").toString())
+ * .build();
+ * ReadTensorboardUsageResponse response =
+ * tensorboardServiceClient.readTensorboardUsage(request);
+ * }
+ * }
+ *
+ * @param request The request object containing all of the parameters for the API call.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ReadTensorboardUsageResponse readTensorboardUsage(
+ ReadTensorboardUsageRequest request) {
+ return readTensorboardUsageCallable().call(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD.
+ /**
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ * {@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * ReadTensorboardUsageRequest request =
+ * ReadTensorboardUsageRequest.newBuilder()
+ * .setTensorboard(
+ * TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]").toString())
+ * .build();
+ * ApiFuture
+ */
+ public final UnaryCallable{@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * TensorboardName tensorboard = TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]");
+ * ReadTensorboardUsageResponse response =
+ * tensorboardServiceClient.readTensorboardUsage(tensorboard);
+ * }
+ * }
+ *
+ * @param tensorboard Required. The name of the Tensorboard resource. Format:
+ * `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ReadTensorboardUsageResponse readTensorboardUsage(TensorboardName tensorboard) {
+ ReadTensorboardUsageRequest request =
+ ReadTensorboardUsageRequest.newBuilder()
+ .setTensorboard(tensorboard == null ? null : tensorboard.toString())
+ .build();
+ return readTensorboardUsage(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD.
+ /**
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ * {@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * String tensorboard =
+ * TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]").toString();
+ * ReadTensorboardUsageResponse response =
+ * tensorboardServiceClient.readTensorboardUsage(tensorboard);
+ * }
+ * }
+ *
+ * @param tensorboard Required. The name of the Tensorboard resource. Format:
+ * `projects/{project}/locations/{location}/tensorboards/{tensorboard}`
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ReadTensorboardUsageResponse readTensorboardUsage(String tensorboard) {
+ ReadTensorboardUsageRequest request =
+ ReadTensorboardUsageRequest.newBuilder().setTensorboard(tensorboard).build();
+ return readTensorboardUsage(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD.
+ /**
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ * {@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * ReadTensorboardUsageRequest request =
+ * ReadTensorboardUsageRequest.newBuilder()
+ * .setTensorboard(
+ * TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]").toString())
+ * .build();
+ * ReadTensorboardUsageResponse response =
+ * tensorboardServiceClient.readTensorboardUsage(request);
+ * }
+ * }
+ *
+ * @param request The request object containing all of the parameters for the API call.
+ * @throws com.google.api.gax.rpc.ApiException if the remote call fails
+ */
+ public final ReadTensorboardUsageResponse readTensorboardUsage(
+ ReadTensorboardUsageRequest request) {
+ return readTensorboardUsageCallable().call(request);
+ }
+
+ // AUTO-GENERATED DOCUMENTATION AND METHOD.
+ /**
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ * {@code
+ * // This snippet has been automatically generated and should be regarded as a code template only.
+ * // It will require modifications to work:
+ * // - It may require correct/in-range values for request initialization.
+ * // - It may require specifying regional endpoints when creating the service client as shown in
+ * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+ * try (TensorboardServiceClient tensorboardServiceClient = TensorboardServiceClient.create()) {
+ * ReadTensorboardUsageRequest request =
+ * ReadTensorboardUsageRequest.newBuilder()
+ * .setTensorboard(
+ * TensorboardName.of("[PROJECT]", "[LOCATION]", "[TENSORBOARD]").toString())
+ * .build();
+ * ApiFuture
+ */
+ public final UnaryCallable
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had
+ * been undeployed.
*
*/
public void deleteIndex(
@@ -633,7 +634,8 @@ public void updateIndex(
*
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had
+ * been undeployed.
*
*/
public void deleteIndex(
@@ -757,7 +759,8 @@ public com.google.longrunning.Operation updateIndex(
*
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had
+ * been undeployed.
*
*/
public com.google.longrunning.Operation deleteIndex(
@@ -871,7 +874,8 @@ protected IndexServiceFutureStub build(
*
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] had
+ * been undeployed.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public void pauseModelDeploymentMonitoringJob(
@@ -2207,12 +2217,15 @@ public void deleteCustomJob(
* Cancels a CustomJob.
* Starts asynchronous cancellation on the CustomJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the CustomJob is not deleted; instead it becomes a job with
- * a [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- * corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to
+ * a [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with
+ * a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
+ * `Code.CANCELLED`, and
+ * [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to
* `CANCELLED`.
*
*/
@@ -2383,13 +2396,17 @@ public void deleteHyperparameterTuningJob(
* Cancels a HyperparameterTuningJob.
* Starts asynchronous cancellation on the HyperparameterTuningJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the HyperparameterTuningJob is not deleted; instead it becomes a job with
- * a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code]
- * of 1, corresponding to `Code.CANCELLED`, and
- * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to `CANCELLED`.
+ * a
+ * [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error]
+ * value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ * corresponding to `Code.CANCELLED`, and
+ * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
+ * is set to `CANCELLED`.
*
*/
public void cancelHyperparameterTuningJob(
@@ -2477,12 +2494,14 @@ public void deleteBatchPredictionJob(
* Cancels a BatchPredictionJob.
* Starts asynchronous cancellation on the BatchPredictionJob. The server
* makes the best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On a successful cancellation,
* the BatchPredictionJob is not deleted;instead its
- * [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] is set to `CANCELLED`. Any files already
- * outputted by the job are not deleted.
+ * [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state]
+ * is set to `CANCELLED`. Any files already outputted by the job are not
+ * deleted.
*
*/
public void cancelBatchPredictionJob(
@@ -2605,7 +2624,8 @@ public void deleteModelDeploymentMonitoringJob(
*
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public void pauseModelDeploymentMonitoringJob(
@@ -2715,12 +2735,15 @@ public com.google.longrunning.Operation deleteCustomJob(
* Cancels a CustomJob.
* Starts asynchronous cancellation on the CustomJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the CustomJob is not deleted; instead it becomes a job with
- * a [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- * corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to
+ * a [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with
+ * a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
+ * `Code.CANCELLED`, and
+ * [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to
* `CANCELLED`.
*
*/
@@ -2855,13 +2878,17 @@ public com.google.longrunning.Operation deleteHyperparameterTuningJob(
* Cancels a HyperparameterTuningJob.
* Starts asynchronous cancellation on the HyperparameterTuningJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the HyperparameterTuningJob is not deleted; instead it becomes a job with
- * a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code]
- * of 1, corresponding to `Code.CANCELLED`, and
- * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to `CANCELLED`.
+ * a
+ * [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error]
+ * value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ * corresponding to `Code.CANCELLED`, and
+ * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
+ * is set to `CANCELLED`.
*
*/
public com.google.protobuf.Empty cancelHyperparameterTuningJob(
@@ -2931,12 +2958,14 @@ public com.google.longrunning.Operation deleteBatchPredictionJob(
* Cancels a BatchPredictionJob.
* Starts asynchronous cancellation on the BatchPredictionJob. The server
* makes the best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On a successful cancellation,
* the BatchPredictionJob is not deleted;instead its
- * [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] is set to `CANCELLED`. Any files already
- * outputted by the job are not deleted.
+ * [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state]
+ * is set to `CANCELLED`. Any files already outputted by the job are not
+ * deleted.
*
*/
public com.google.protobuf.Empty cancelBatchPredictionJob(
@@ -3038,7 +3067,8 @@ public com.google.longrunning.Operation deleteModelDeploymentMonitoringJob(
*
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public com.google.protobuf.Empty pauseModelDeploymentMonitoringJob(
@@ -3144,12 +3174,15 @@ protected JobServiceFutureStub build(io.grpc.Channel channel, io.grpc.CallOption
* Cancels a CustomJob.
* Starts asynchronous cancellation on the CustomJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the CustomJob is not deleted; instead it becomes a job with
- * a [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- * corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to
+ * a [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with
+ * a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
+ * `Code.CANCELLED`, and
+ * [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to
* `CANCELLED`.
*
*/
@@ -3295,13 +3328,17 @@ protected JobServiceFutureStub build(io.grpc.Channel channel, io.grpc.CallOption
* Cancels a HyperparameterTuningJob.
* Starts asynchronous cancellation on the HyperparameterTuningJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the HyperparameterTuningJob is not deleted; instead it becomes a job with
- * a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code]
- * of 1, corresponding to `Code.CANCELLED`, and
- * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] is set to `CANCELLED`.
+ * a
+ * [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error]
+ * value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ * corresponding to `Code.CANCELLED`, and
+ * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
+ * is set to `CANCELLED`.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * field.
*
*/
public void deleteModel(
@@ -873,7 +876,8 @@ public void deleteModel(
* Deletes a Model version.
* Model version can only be deleted if there are no [DeployedModels][]
* created from it. Deleting the only version in the Model is not allowed. Use
- * [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for deleting the Model instead.
+ * [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for
+ * deleting the Model instead.
*
*/
public void deleteModelVersion(
@@ -903,7 +907,8 @@ public void mergeVersionAliases(
*
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1.Model.supported_export_formats].
*
*/
public void exportModel(
@@ -1211,9 +1216,12 @@ public void updateModel(
*
*
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * field.
*
*/
public void deleteModel(
@@ -1232,7 +1240,8 @@ public void deleteModel(
* Deletes a Model version.
* Model version can only be deleted if there are no [DeployedModels][]
* created from it. Deleting the only version in the Model is not allowed. Use
- * [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for deleting the Model instead.
+ * [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for
+ * deleting the Model instead.
*
*/
public void deleteModelVersion(
@@ -1266,7 +1275,8 @@ public void mergeVersionAliases(
*
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1.Model.supported_export_formats].
*
*/
public void exportModel(
@@ -1472,9 +1482,12 @@ public com.google.cloud.aiplatform.v1.Model updateModel(
*
*
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * field.
*
*/
public com.google.longrunning.Operation deleteModel(
@@ -1490,7 +1503,8 @@ public com.google.longrunning.Operation deleteModel(
* Deletes a Model version.
* Model version can only be deleted if there are no [DeployedModels][]
* created from it. Deleting the only version in the Model is not allowed. Use
- * [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for deleting the Model instead.
+ * [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for
+ * deleting the Model instead.
*
*/
public com.google.longrunning.Operation deleteModelVersion(
@@ -1518,7 +1532,8 @@ public com.google.cloud.aiplatform.v1.Model mergeVersionAliases(
*
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1.Model.supported_export_formats].
*
*/
public com.google.longrunning.Operation exportModel(
@@ -1699,9 +1714,12 @@ protected ModelServiceFutureStub build(
*
*
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * field.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1.Model.supported_export_formats].
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
- * that served this prediction.
*
*/
public void rawPredict(
@@ -252,10 +254,12 @@ public void rawPredict(
*
*
* Perform an online explanation.
- * If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] is specified,
- * the corresponding DeployModel must have
+ * If
+ * [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
+ * is specified, the corresponding DeployModel must have
* [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
- * populated. If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
+ * populated. If
+ * [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
* is not specified, all DeployedModels must have
* [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
* populated. Only deployed AutoML tabular Models have
@@ -334,10 +338,12 @@ public void predict(
*
*/
public void cancelBatchPredictionJob(
@@ -1929,7 +1938,8 @@ public void deleteModelDeploymentMonitoringJob(
*
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
- * that served this prediction.
*
*/
public void rawPredict(
@@ -352,10 +358,12 @@ public void rawPredict(
*
*
* Perform an online explanation.
- * If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] is specified,
- * the corresponding DeployModel must have
+ * If
+ * [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
+ * is specified, the corresponding DeployModel must have
* [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
- * populated. If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
+ * populated. If
+ * [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
* is not specified, all DeployedModels must have
* [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
* populated. Only deployed AutoML tabular Models have
@@ -410,10 +418,12 @@ public com.google.cloud.aiplatform.v1.PredictResponse predict(
*
*/
public void cancelHyperparameterTuningJob(
@@ -1813,12 +1820,14 @@ public void deleteBatchPredictionJob(
* Cancels a BatchPredictionJob.
* Starts asynchronous cancellation on the BatchPredictionJob. The server
* makes the best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On a successful cancellation,
* the BatchPredictionJob is not deleted;instead its
- * [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to `CANCELLED`. Any files already
- * outputted by the job are not deleted.
+ * [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state]
+ * is set to `CANCELLED`. Any files already outputted by the job are not
+ * deleted.
*
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
- * that served this prediction.
*
*/
public com.google.api.HttpBody rawPredict(
@@ -427,10 +437,12 @@ public com.google.api.HttpBody rawPredict(
*
*
* Perform an online explanation.
- * If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] is specified,
- * the corresponding DeployModel must have
+ * If
+ * [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
+ * is specified, the corresponding DeployModel must have
* [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
- * populated. If [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
+ * populated. If
+ * [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id]
* is not specified, all DeployedModels must have
* [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]
* populated. Only deployed AutoML tabular Models have
@@ -483,10 +495,12 @@ protected PredictionServiceFutureStub build(
*
*/
public void cancelCustomJob(
@@ -1728,13 +1731,17 @@ public void deleteHyperparameterTuningJob(
* Cancels a HyperparameterTuningJob.
* Starts asynchronous cancellation on the HyperparameterTuningJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the HyperparameterTuningJob is not deleted; instead it becomes a job with
- * a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code]
- * of 1, corresponding to `Code.CANCELLED`, and
- * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to `CANCELLED`.
+ * a
+ * [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error]
+ * value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ * corresponding to `Code.CANCELLED`, and
+ * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state]
+ * is set to `CANCELLED`.
*
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served this
+ * prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
- * that served this prediction.
*
*/
public com.google.common.util.concurrent.ListenableFuture
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public void readTensorboardUsage(
+ com.google.cloud.aiplatform.v1.ReadTensorboardUsageRequest request,
+ io.grpc.stub.StreamObserver
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -1860,8 +1925,8 @@ public void batchReadTensorboardTimeSeriesData(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -1898,8 +1963,7 @@ public void readTensorboardBlobData(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardExperimentData(
@@ -1916,8 +1980,7 @@ public void writeTensorboardExperimentData(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardRunData(
@@ -1960,6 +2023,13 @@ public final io.grpc.ServerServiceDefinition bindService() {
new MethodHandlers<
com.google.cloud.aiplatform.v1.GetTensorboardRequest,
com.google.cloud.aiplatform.v1.Tensorboard>(this, METHODID_GET_TENSORBOARD)))
+ .addMethod(
+ getReadTensorboardUsageMethod(),
+ io.grpc.stub.ServerCalls.asyncUnaryCall(
+ new MethodHandlers<
+ com.google.cloud.aiplatform.v1.ReadTensorboardUsageRequest,
+ com.google.cloud.aiplatform.v1.ReadTensorboardUsageResponse>(
+ this, METHODID_READ_TENSORBOARD_USAGE)))
.addMethod(
getUpdateTensorboardMethod(),
io.grpc.stub.ServerCalls.asyncUnaryCall(
@@ -2194,6 +2264,23 @@ public void getTensorboard(
responseObserver);
}
+ /**
+ *
+ *
+ *
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public void readTensorboardUsage(
+ com.google.cloud.aiplatform.v1.ReadTensorboardUsageRequest request,
+ io.grpc.stub.StreamObserver
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -2560,8 +2647,8 @@ public void batchReadTensorboardTimeSeriesData(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -2602,8 +2689,7 @@ public void readTensorboardBlobData(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardExperimentData(
@@ -2622,8 +2708,7 @@ public void writeTensorboardExperimentData(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardRunData(
@@ -2702,6 +2787,19 @@ public com.google.cloud.aiplatform.v1.Tensorboard getTensorboard(
getChannel(), getGetTensorboardMethod(), getCallOptions(), request);
}
+ /**
+ *
+ *
+ *
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public com.google.cloud.aiplatform.v1.ReadTensorboardUsageResponse readTensorboardUsage(
+ com.google.cloud.aiplatform.v1.ReadTensorboardUsageRequest request) {
+ return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ getChannel(), getReadTensorboardUsageMethod(), getCallOptions(), request);
+ }
+
/**
*
*
@@ -2972,8 +3070,8 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -2989,8 +3087,8 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -3024,8 +3122,7 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public com.google.cloud.aiplatform.v1.WriteTensorboardExperimentDataResponse
@@ -3040,8 +3137,7 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public com.google.cloud.aiplatform.v1.WriteTensorboardRunDataResponse writeTensorboardRunData(
@@ -3112,6 +3208,20 @@ protected TensorboardServiceFutureStub build(
getChannel().newCall(getGetTensorboardMethod(), getCallOptions()), request);
}
+ /**
+ *
+ *
+ *
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public com.google.common.util.concurrent.ListenableFuture<
+ com.google.cloud.aiplatform.v1.ReadTensorboardUsageResponse>
+ readTensorboardUsage(com.google.cloud.aiplatform.v1.ReadTensorboardUsageRequest request) {
+ return io.grpc.stub.ClientCalls.futureUnaryCall(
+ getChannel().newCall(getReadTensorboardUsageMethod(), getCallOptions()), request);
+ }
+
/**
*
*
@@ -3406,8 +3516,8 @@ protected TensorboardServiceFutureStub build(
*
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -3425,8 +3535,8 @@ protected TensorboardServiceFutureStub build(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -3445,8 +3555,7 @@ protected TensorboardServiceFutureStub build(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public com.google.common.util.concurrent.ListenableFuture<
@@ -3463,8 +3572,7 @@ protected TensorboardServiceFutureStub build(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public com.google.common.util.concurrent.ListenableFuture<
@@ -3495,32 +3603,33 @@ protected TensorboardServiceFutureStub build(
private static final int METHODID_CREATE_TENSORBOARD = 0;
private static final int METHODID_GET_TENSORBOARD = 1;
- private static final int METHODID_UPDATE_TENSORBOARD = 2;
- private static final int METHODID_LIST_TENSORBOARDS = 3;
- private static final int METHODID_DELETE_TENSORBOARD = 4;
- private static final int METHODID_CREATE_TENSORBOARD_EXPERIMENT = 5;
- private static final int METHODID_GET_TENSORBOARD_EXPERIMENT = 6;
- private static final int METHODID_UPDATE_TENSORBOARD_EXPERIMENT = 7;
- private static final int METHODID_LIST_TENSORBOARD_EXPERIMENTS = 8;
- private static final int METHODID_DELETE_TENSORBOARD_EXPERIMENT = 9;
- private static final int METHODID_CREATE_TENSORBOARD_RUN = 10;
- private static final int METHODID_BATCH_CREATE_TENSORBOARD_RUNS = 11;
- private static final int METHODID_GET_TENSORBOARD_RUN = 12;
- private static final int METHODID_UPDATE_TENSORBOARD_RUN = 13;
- private static final int METHODID_LIST_TENSORBOARD_RUNS = 14;
- private static final int METHODID_DELETE_TENSORBOARD_RUN = 15;
- private static final int METHODID_BATCH_CREATE_TENSORBOARD_TIME_SERIES = 16;
- private static final int METHODID_CREATE_TENSORBOARD_TIME_SERIES = 17;
- private static final int METHODID_GET_TENSORBOARD_TIME_SERIES = 18;
- private static final int METHODID_UPDATE_TENSORBOARD_TIME_SERIES = 19;
- private static final int METHODID_LIST_TENSORBOARD_TIME_SERIES = 20;
- private static final int METHODID_DELETE_TENSORBOARD_TIME_SERIES = 21;
- private static final int METHODID_BATCH_READ_TENSORBOARD_TIME_SERIES_DATA = 22;
- private static final int METHODID_READ_TENSORBOARD_TIME_SERIES_DATA = 23;
- private static final int METHODID_READ_TENSORBOARD_BLOB_DATA = 24;
- private static final int METHODID_WRITE_TENSORBOARD_EXPERIMENT_DATA = 25;
- private static final int METHODID_WRITE_TENSORBOARD_RUN_DATA = 26;
- private static final int METHODID_EXPORT_TENSORBOARD_TIME_SERIES_DATA = 27;
+ private static final int METHODID_READ_TENSORBOARD_USAGE = 2;
+ private static final int METHODID_UPDATE_TENSORBOARD = 3;
+ private static final int METHODID_LIST_TENSORBOARDS = 4;
+ private static final int METHODID_DELETE_TENSORBOARD = 5;
+ private static final int METHODID_CREATE_TENSORBOARD_EXPERIMENT = 6;
+ private static final int METHODID_GET_TENSORBOARD_EXPERIMENT = 7;
+ private static final int METHODID_UPDATE_TENSORBOARD_EXPERIMENT = 8;
+ private static final int METHODID_LIST_TENSORBOARD_EXPERIMENTS = 9;
+ private static final int METHODID_DELETE_TENSORBOARD_EXPERIMENT = 10;
+ private static final int METHODID_CREATE_TENSORBOARD_RUN = 11;
+ private static final int METHODID_BATCH_CREATE_TENSORBOARD_RUNS = 12;
+ private static final int METHODID_GET_TENSORBOARD_RUN = 13;
+ private static final int METHODID_UPDATE_TENSORBOARD_RUN = 14;
+ private static final int METHODID_LIST_TENSORBOARD_RUNS = 15;
+ private static final int METHODID_DELETE_TENSORBOARD_RUN = 16;
+ private static final int METHODID_BATCH_CREATE_TENSORBOARD_TIME_SERIES = 17;
+ private static final int METHODID_CREATE_TENSORBOARD_TIME_SERIES = 18;
+ private static final int METHODID_GET_TENSORBOARD_TIME_SERIES = 19;
+ private static final int METHODID_UPDATE_TENSORBOARD_TIME_SERIES = 20;
+ private static final int METHODID_LIST_TENSORBOARD_TIME_SERIES = 21;
+ private static final int METHODID_DELETE_TENSORBOARD_TIME_SERIES = 22;
+ private static final int METHODID_BATCH_READ_TENSORBOARD_TIME_SERIES_DATA = 23;
+ private static final int METHODID_READ_TENSORBOARD_TIME_SERIES_DATA = 24;
+ private static final int METHODID_READ_TENSORBOARD_BLOB_DATA = 25;
+ private static final int METHODID_WRITE_TENSORBOARD_EXPERIMENT_DATA = 26;
+ private static final int METHODID_WRITE_TENSORBOARD_RUN_DATA = 27;
+ private static final int METHODID_EXPORT_TENSORBOARD_TIME_SERIES_DATA = 28;
private static final class MethodHandlers
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
+ * had been undeployed.
*
*/
public void deleteIndex(
@@ -640,7 +641,8 @@ public void updateIndex(
*
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
+ * had been undeployed.
*
*/
public void deleteIndex(
@@ -764,7 +766,8 @@ public com.google.longrunning.Operation updateIndex(
*
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
+ * had been undeployed.
*
*/
public com.google.longrunning.Operation deleteIndex(
@@ -879,7 +882,8 @@ protected IndexServiceFutureStub build(
*
* Deletes an Index.
* An Index can only be deleted when all its
- * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] had been undeployed.
+ * [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
+ * had been undeployed.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public void pauseModelDeploymentMonitoringJob(
@@ -2247,13 +2257,16 @@ public void deleteCustomJob(
* Cancels a CustomJob.
* Starts asynchronous cancellation on the CustomJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the CustomJob is not deleted; instead it becomes a job with
- * a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- * corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to
- * `CANCELLED`.
+ * a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value
+ * with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding
+ * to `Code.CANCELLED`, and
+ * [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set
+ * to `CANCELLED`.
*
*/
public void cancelCustomJob(
@@ -2424,13 +2437,17 @@ public void deleteHyperparameterTuningJob(
* Cancels a HyperparameterTuningJob.
* Starts asynchronous cancellation on the HyperparameterTuningJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the HyperparameterTuningJob is not deleted; instead it becomes a job with
- * a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code]
- * of 1, corresponding to `Code.CANCELLED`, and
- * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to `CANCELLED`.
+ * a
+ * [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error]
+ * value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ * corresponding to `Code.CANCELLED`, and
+ * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state]
+ * is set to `CANCELLED`.
*
*/
public void cancelHyperparameterTuningJob(
@@ -2519,12 +2536,14 @@ public void deleteBatchPredictionJob(
* Cancels a BatchPredictionJob.
* Starts asynchronous cancellation on the BatchPredictionJob. The server
* makes the best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On a successful cancellation,
* the BatchPredictionJob is not deleted;instead its
- * [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to `CANCELLED`. Any files already
- * outputted by the job are not deleted.
+ * [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state]
+ * is set to `CANCELLED`. Any files already outputted by the job are not
+ * deleted.
*
*/
public void cancelBatchPredictionJob(
@@ -2650,7 +2669,8 @@ public void deleteModelDeploymentMonitoringJob(
*
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public void pauseModelDeploymentMonitoringJob(
@@ -2760,13 +2780,16 @@ public com.google.longrunning.Operation deleteCustomJob(
* Cancels a CustomJob.
* Starts asynchronous cancellation on the CustomJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the CustomJob is not deleted; instead it becomes a job with
- * a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- * corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to
- * `CANCELLED`.
+ * a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value
+ * with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding
+ * to `Code.CANCELLED`, and
+ * [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set
+ * to `CANCELLED`.
*
*/
public com.google.protobuf.Empty cancelCustomJob(
@@ -2901,13 +2924,17 @@ public com.google.longrunning.Operation deleteHyperparameterTuningJob(
* Cancels a HyperparameterTuningJob.
* Starts asynchronous cancellation on the HyperparameterTuningJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the HyperparameterTuningJob is not deleted; instead it becomes a job with
- * a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code]
- * of 1, corresponding to `Code.CANCELLED`, and
- * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to `CANCELLED`.
+ * a
+ * [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error]
+ * value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ * corresponding to `Code.CANCELLED`, and
+ * [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state]
+ * is set to `CANCELLED`.
*
*/
public com.google.protobuf.Empty cancelHyperparameterTuningJob(
@@ -2978,12 +3005,14 @@ public com.google.longrunning.Operation deleteBatchPredictionJob(
* Cancels a BatchPredictionJob.
* Starts asynchronous cancellation on the BatchPredictionJob. The server
* makes the best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On a successful cancellation,
* the BatchPredictionJob is not deleted;instead its
- * [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to `CANCELLED`. Any files already
- * outputted by the job are not deleted.
+ * [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state]
+ * is set to `CANCELLED`. Any files already outputted by the job are not
+ * deleted.
*
*/
public com.google.protobuf.Empty cancelBatchPredictionJob(
@@ -3085,7 +3114,8 @@ public com.google.longrunning.Operation deleteModelDeploymentMonitoringJob(
*
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public com.google.protobuf.Empty pauseModelDeploymentMonitoringJob(
@@ -3191,13 +3221,16 @@ protected JobServiceFutureStub build(io.grpc.Channel channel, io.grpc.CallOption
* Cancels a CustomJob.
* Starts asynchronous cancellation on the CustomJob. The server
* makes a best effort to cancel the job, but success is not
- * guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or
- * other methods to check whether the cancellation succeeded or whether the
+ * guaranteed. Clients can use
+ * [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]
+ * or other methods to check whether the cancellation succeeded or whether the
* job completed despite cancellation. On successful cancellation,
* the CustomJob is not deleted; instead it becomes a job with
- * a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- * corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to
- * `CANCELLED`.
+ * a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value
+ * with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding
+ * to `Code.CANCELLED`, and
+ * [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set
+ * to `CANCELLED`.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Pauses a ModelDeploymentMonitoringJob. If the job is running, the server
* makes a best effort to cancel the job. Will mark
- * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] to 'PAUSED'.
+ * [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state]
+ * to 'PAUSED'.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
+ * field.
*
*/
public void deleteModel(
@@ -951,7 +954,8 @@ public void deleteModel(
* Deletes a Model version.
* Model version can only be deleted if there are no [DeployedModels][]
* created from it. Deleting the only version in the Model is not allowed. Use
- * [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for deleting the Model instead.
+ * [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for
+ * deleting the Model instead.
*
*/
public void deleteModelVersion(
@@ -981,7 +985,8 @@ public void mergeVersionAliases(
*
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
*
*/
public void exportModel(
@@ -1313,9 +1318,12 @@ public void updateExplanationDataset(
*
*
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
+ * field.
*
*/
public void deleteModel(
@@ -1334,7 +1342,8 @@ public void deleteModel(
* Deletes a Model version.
* Model version can only be deleted if there are no [DeployedModels][]
* created from it. Deleting the only version in the Model is not allowed. Use
- * [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for deleting the Model instead.
+ * [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for
+ * deleting the Model instead.
*
*/
public void deleteModelVersion(
@@ -1368,7 +1377,8 @@ public void mergeVersionAliases(
*
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
*
*/
public void exportModel(
@@ -1588,9 +1598,12 @@ public com.google.longrunning.Operation updateExplanationDataset(
*
*
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
+ * field.
*
*/
public com.google.longrunning.Operation deleteModel(
@@ -1606,7 +1619,8 @@ public com.google.longrunning.Operation deleteModel(
* Deletes a Model version.
* Model version can only be deleted if there are no [DeployedModels][]
* created from it. Deleting the only version in the Model is not allowed. Use
- * [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for deleting the Model instead.
+ * [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] for
+ * deleting the Model instead.
*
*/
public com.google.longrunning.Operation deleteModelVersion(
@@ -1634,7 +1648,8 @@ public com.google.cloud.aiplatform.v1beta1.Model mergeVersionAliases(
*
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
*
*/
public com.google.longrunning.Operation exportModel(
@@ -1831,9 +1846,12 @@ protected ModelServiceFutureStub build(
*
*
* Deletes a Model.
- * A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
- * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the model in its
- * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] field.
+ * A model cannot be deleted if any
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource has a
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] based on the
+ * model in its
+ * [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
+ * field.
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Exports a trained, exportable Model to a location specified by the
* user. A Model is considered to be exportable if it has at least one
- * [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
+ * [supported export
+ * format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
*
*/
public com.google.common.util.concurrent.ListenableFuture
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
- * that served this prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] that served
+ * this prediction.
*
*/
public void rawPredict(
@@ -257,10 +259,12 @@ public void rawPredict(
*
*
* Perform an online explanation.
- * If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified,
- * the corresponding DeployModel must have
+ * If
+ * [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
+ * is specified, the corresponding DeployModel must have
* [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
- * populated. If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
+ * populated. If
+ * [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
* is not specified, all DeployedModels must have
* [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
* populated. Only deployed AutoML tabular Models have
@@ -339,10 +343,12 @@ public void predict(
*
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
- * that served this prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] that served
+ * this prediction.
*
*/
public void rawPredict(
@@ -357,10 +363,12 @@ public void rawPredict(
*
*
* Perform an online explanation.
- * If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified,
- * the corresponding DeployModel must have
+ * If
+ * [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
+ * is specified, the corresponding DeployModel must have
* [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
- * populated. If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
+ * populated. If
+ * [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
* is not specified, all DeployedModels must have
* [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
* populated. Only deployed AutoML tabular Models have
@@ -415,10 +423,12 @@ public com.google.cloud.aiplatform.v1beta1.PredictResponse predict(
*
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
- * that served this prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] that served
+ * this prediction.
*
*/
public com.google.api.HttpBody rawPredict(
@@ -432,10 +442,12 @@ public com.google.api.HttpBody rawPredict(
*
*
* Perform an online explanation.
- * If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified,
- * the corresponding DeployModel must have
+ * If
+ * [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
+ * is specified, the corresponding DeployModel must have
* [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
- * populated. If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
+ * populated. If
+ * [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id]
* is not specified, all DeployedModels must have
* [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec]
* populated. Only deployed AutoML tabular Models have
@@ -488,10 +500,12 @@ protected PredictionServiceFutureStub build(
*
* Perform an online prediction with an arbitrary HTTP payload.
* The response includes the following HTTP headers:
- * * `X-Vertex-AI-Endpoint-Id`: ID of the [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
+ * * `X-Vertex-AI-Endpoint-Id`: ID of the
+ * [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that served this
* prediction.
- * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
- * that served this prediction.
+ * * `X-Vertex-AI-Deployed-Model-Id`: ID of the Endpoint's
+ * [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] that served
+ * this prediction.
*
*/
public com.google.common.util.concurrent.ListenableFuture
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public void readTensorboardUsage(
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageRequest request,
+ io.grpc.stub.StreamObserver<
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageResponse>
+ responseObserver) {
+ io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(
+ getReadTensorboardUsageMethod(), responseObserver);
+ }
+
/**
*
*
@@ -1863,8 +1929,8 @@ public void deleteTensorboardTimeSeries(
*
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -1882,8 +1948,8 @@ public void batchReadTensorboardTimeSeriesData(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -1921,8 +1987,7 @@ public void readTensorboardBlobData(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardExperimentData(
@@ -1939,8 +2004,7 @@ public void writeTensorboardExperimentData(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardRunData(
@@ -1985,6 +2049,13 @@ public final io.grpc.ServerServiceDefinition bindService() {
com.google.cloud.aiplatform.v1beta1.GetTensorboardRequest,
com.google.cloud.aiplatform.v1beta1.Tensorboard>(
this, METHODID_GET_TENSORBOARD)))
+ .addMethod(
+ getReadTensorboardUsageMethod(),
+ io.grpc.stub.ServerCalls.asyncUnaryCall(
+ new MethodHandlers<
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageRequest,
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageResponse>(
+ this, METHODID_READ_TENSORBOARD_USAGE)))
.addMethod(
getUpdateTensorboardMethod(),
io.grpc.stub.ServerCalls.asyncUnaryCall(
@@ -2221,6 +2292,24 @@ public void getTensorboard(
responseObserver);
}
+ /**
+ *
+ *
+ *
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public void readTensorboardUsage(
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageRequest request,
+ io.grpc.stub.StreamObserver<
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageResponse>
+ responseObserver) {
+ io.grpc.stub.ClientCalls.asyncUnaryCall(
+ getChannel().newCall(getReadTensorboardUsageMethod(), getCallOptions()),
+ request,
+ responseObserver);
+ }
+
/**
*
*
@@ -2566,8 +2655,8 @@ public void deleteTensorboardTimeSeries(
*
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -2587,8 +2676,8 @@ public void batchReadTensorboardTimeSeriesData(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -2630,8 +2719,7 @@ public void readTensorboardBlobData(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardExperimentData(
@@ -2650,8 +2738,7 @@ public void writeTensorboardExperimentData(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public void writeTensorboardRunData(
@@ -2731,6 +2818,19 @@ public com.google.cloud.aiplatform.v1beta1.Tensorboard getTensorboard(
getChannel(), getGetTensorboardMethod(), getCallOptions(), request);
}
+ /**
+ *
+ *
+ *
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageResponse readTensorboardUsage(
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageRequest request) {
+ return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ getChannel(), getReadTensorboardUsageMethod(), getCallOptions(), request);
+ }
+
/**
*
*
@@ -3001,8 +3101,8 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -3018,8 +3118,8 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -3053,8 +3153,7 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public com.google.cloud.aiplatform.v1beta1.WriteTensorboardExperimentDataResponse
@@ -3069,8 +3168,7 @@ public com.google.longrunning.Operation deleteTensorboardTimeSeries(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public com.google.cloud.aiplatform.v1beta1.WriteTensorboardRunDataResponse
@@ -3142,6 +3240,21 @@ protected TensorboardServiceFutureStub build(
getChannel().newCall(getGetTensorboardMethod(), getCallOptions()), request);
}
+ /**
+ *
+ *
+ *
+ * Returns a list of monthly active users for a given TensorBoard instance.
+ *
+ */
+ public com.google.common.util.concurrent.ListenableFuture<
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageResponse>
+ readTensorboardUsage(
+ com.google.cloud.aiplatform.v1beta1.ReadTensorboardUsageRequest request) {
+ return io.grpc.stub.ClientCalls.futureUnaryCall(
+ getChannel().newCall(getReadTensorboardUsageMethod(), getCallOptions()), request);
+ }
+
/**
*
*
@@ -3440,8 +3553,8 @@ protected TensorboardServiceFutureStub build(
*
* Reads multiple TensorboardTimeSeries' data. The data point number limit is
* 1000 for scalars, 100 for tensors and blob references. If the number of
- * data points stored is less than the limit, all data will be returned.
- * Otherwise, that limit number of data points will be randomly selected from
+ * data points stored is less than the limit, all data is returned.
+ * Otherwise, the number limit of data points is randomly selected from
* this time series and returned.
*
*/
@@ -3459,8 +3572,8 @@ protected TensorboardServiceFutureStub build(
*
*
* Reads a TensorboardTimeSeries' data. By default, if the number of data
- * points stored is less than 1000, all data will be returned. Otherwise, 1000
- * data points will be randomly selected from this time series and returned.
+ * points stored is less than 1000, all data is returned. Otherwise, 1000
+ * data points is randomly selected from this time series and returned.
* This value can be changed by changing max_data_points, which can't be
* greater than 10k.
*
@@ -3479,8 +3592,7 @@ protected TensorboardServiceFutureStub build(
*
*
* Write time series data points of multiple TensorboardTimeSeries in multiple
- * TensorboardRun's. If any data fail to be ingested, an error will be
- * returned.
+ * TensorboardRun's. If any data fail to be ingested, an error is returned.
*
*/
public com.google.common.util.concurrent.ListenableFuture<
@@ -3497,8 +3609,7 @@ protected TensorboardServiceFutureStub build(
*
*
* Write time series data points into multiple TensorboardTimeSeries under
- * a TensorboardRun. If any data fail to be ingested, an error will be
- * returned.
+ * a TensorboardRun. If any data fail to be ingested, an error is returned.
*
*/
public com.google.common.util.concurrent.ListenableFuture<
@@ -3529,32 +3640,33 @@ protected TensorboardServiceFutureStub build(
private static final int METHODID_CREATE_TENSORBOARD = 0;
private static final int METHODID_GET_TENSORBOARD = 1;
- private static final int METHODID_UPDATE_TENSORBOARD = 2;
- private static final int METHODID_LIST_TENSORBOARDS = 3;
- private static final int METHODID_DELETE_TENSORBOARD = 4;
- private static final int METHODID_CREATE_TENSORBOARD_EXPERIMENT = 5;
- private static final int METHODID_GET_TENSORBOARD_EXPERIMENT = 6;
- private static final int METHODID_UPDATE_TENSORBOARD_EXPERIMENT = 7;
- private static final int METHODID_LIST_TENSORBOARD_EXPERIMENTS = 8;
- private static final int METHODID_DELETE_TENSORBOARD_EXPERIMENT = 9;
- private static final int METHODID_CREATE_TENSORBOARD_RUN = 10;
- private static final int METHODID_BATCH_CREATE_TENSORBOARD_RUNS = 11;
- private static final int METHODID_GET_TENSORBOARD_RUN = 12;
- private static final int METHODID_UPDATE_TENSORBOARD_RUN = 13;
- private static final int METHODID_LIST_TENSORBOARD_RUNS = 14;
- private static final int METHODID_DELETE_TENSORBOARD_RUN = 15;
- private static final int METHODID_BATCH_CREATE_TENSORBOARD_TIME_SERIES = 16;
- private static final int METHODID_CREATE_TENSORBOARD_TIME_SERIES = 17;
- private static final int METHODID_GET_TENSORBOARD_TIME_SERIES = 18;
- private static final int METHODID_UPDATE_TENSORBOARD_TIME_SERIES = 19;
- private static final int METHODID_LIST_TENSORBOARD_TIME_SERIES = 20;
- private static final int METHODID_DELETE_TENSORBOARD_TIME_SERIES = 21;
- private static final int METHODID_BATCH_READ_TENSORBOARD_TIME_SERIES_DATA = 22;
- private static final int METHODID_READ_TENSORBOARD_TIME_SERIES_DATA = 23;
- private static final int METHODID_READ_TENSORBOARD_BLOB_DATA = 24;
- private static final int METHODID_WRITE_TENSORBOARD_EXPERIMENT_DATA = 25;
- private static final int METHODID_WRITE_TENSORBOARD_RUN_DATA = 26;
- private static final int METHODID_EXPORT_TENSORBOARD_TIME_SERIES_DATA = 27;
+ private static final int METHODID_READ_TENSORBOARD_USAGE = 2;
+ private static final int METHODID_UPDATE_TENSORBOARD = 3;
+ private static final int METHODID_LIST_TENSORBOARDS = 4;
+ private static final int METHODID_DELETE_TENSORBOARD = 5;
+ private static final int METHODID_CREATE_TENSORBOARD_EXPERIMENT = 6;
+ private static final int METHODID_GET_TENSORBOARD_EXPERIMENT = 7;
+ private static final int METHODID_UPDATE_TENSORBOARD_EXPERIMENT = 8;
+ private static final int METHODID_LIST_TENSORBOARD_EXPERIMENTS = 9;
+ private static final int METHODID_DELETE_TENSORBOARD_EXPERIMENT = 10;
+ private static final int METHODID_CREATE_TENSORBOARD_RUN = 11;
+ private static final int METHODID_BATCH_CREATE_TENSORBOARD_RUNS = 12;
+ private static final int METHODID_GET_TENSORBOARD_RUN = 13;
+ private static final int METHODID_UPDATE_TENSORBOARD_RUN = 14;
+ private static final int METHODID_LIST_TENSORBOARD_RUNS = 15;
+ private static final int METHODID_DELETE_TENSORBOARD_RUN = 16;
+ private static final int METHODID_BATCH_CREATE_TENSORBOARD_TIME_SERIES = 17;
+ private static final int METHODID_CREATE_TENSORBOARD_TIME_SERIES = 18;
+ private static final int METHODID_GET_TENSORBOARD_TIME_SERIES = 19;
+ private static final int METHODID_UPDATE_TENSORBOARD_TIME_SERIES = 20;
+ private static final int METHODID_LIST_TENSORBOARD_TIME_SERIES = 21;
+ private static final int METHODID_DELETE_TENSORBOARD_TIME_SERIES = 22;
+ private static final int METHODID_BATCH_READ_TENSORBOARD_TIME_SERIES_DATA = 23;
+ private static final int METHODID_READ_TENSORBOARD_TIME_SERIES_DATA = 24;
+ private static final int METHODID_READ_TENSORBOARD_BLOB_DATA = 25;
+ private static final int METHODID_WRITE_TENSORBOARD_EXPERIMENT_DATA = 26;
+ private static final int METHODID_WRITE_TENSORBOARD_RUN_DATA = 27;
+ private static final int METHODID_EXPORT_TENSORBOARD_TIME_SERIES_DATA = 28;
private static final class MethodHandlers
- * Request message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
+ * Request message for
+ * [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextArtifactsAndExecutionsRequest}
@@ -77,9 +78,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
@@ -105,9 +105,8 @@ public java.lang.String getContext() {
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
@@ -469,7 +468,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Request message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
+ * Request message for
+ * [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextArtifactsAndExecutionsRequest}
@@ -699,9 +699,8 @@ public Builder mergeFrom(
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
@@ -726,9 +725,8 @@ public java.lang.String getContext() {
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
@@ -753,9 +751,8 @@ public com.google.protobuf.ByteString getContextBytes() {
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
@@ -779,9 +776,8 @@ public Builder setContext(java.lang.String value) {
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
@@ -801,9 +797,8 @@ public Builder clearContext() {
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsRequestOrBuilder.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsRequestOrBuilder.java
index 661ee03470aa..8d23a60c21cc 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsRequestOrBuilder.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsRequestOrBuilder.java
@@ -27,9 +27,8 @@ public interface AddContextArtifactsAndExecutionsRequestOrBuilder
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
@@ -44,9 +43,8 @@ public interface AddContextArtifactsAndExecutionsRequestOrBuilder
*
*
*
- * Required. The resource name of the Context that the Artifacts and Executions
- * belong to.
- * Format:
+ * Required. The resource name of the Context that the Artifacts and
+ * Executions belong to. Format:
* `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`
*
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsResponse.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsResponse.java
index 0306cc6a3bc4..31823127557f 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsResponse.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextArtifactsAndExecutionsResponse.java
@@ -22,7 +22,8 @@
*
*
*
- * Response message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
+ * Response message for
+ * [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextArtifactsAndExecutionsResponse}
@@ -223,7 +224,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Response message for [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
+ * Response message for
+ * [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextArtifactsAndExecutionsResponse}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenRequest.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenRequest.java
index 9df6451922ed..07e3ad9a6ddc 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenRequest.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenRequest.java
@@ -22,7 +22,8 @@
*
*
*
- * Request message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
+ * Request message for
+ * [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextChildrenRequest}
@@ -366,7 +367,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Request message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
+ * Request message for
+ * [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextChildrenRequest}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenResponse.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenResponse.java
index b9f118ea1ffe..c3270bebd7ee 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenResponse.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddContextChildrenResponse.java
@@ -22,7 +22,8 @@
*
*
*
- * Response message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
+ * Response message for
+ * [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextChildrenResponse}
@@ -220,7 +221,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Response message for [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
+ * Response message for
+ * [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddContextChildrenResponse}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsRequest.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsRequest.java
index 0f8f1b67502f..1bd77b2a2fe2 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsRequest.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsRequest.java
@@ -22,7 +22,8 @@
*
*
*
- * Request message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
+ * Request message for
+ * [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddExecutionEventsRequest}
@@ -371,7 +372,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Request message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
+ * Request message for
+ * [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddExecutionEventsRequest}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsResponse.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsResponse.java
index 0cdc2bf8f674..e576dcf82748 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsResponse.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddExecutionEventsResponse.java
@@ -22,7 +22,8 @@
*
*
*
- * Response message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
+ * Response message for
+ * [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddExecutionEventsResponse}
@@ -220,7 +221,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Response message for [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
+ * Response message for
+ * [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddExecutionEventsResponse}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddTrialMeasurementRequest.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddTrialMeasurementRequest.java
index b6e2304fd27d..cdc9a3c0cb28 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddTrialMeasurementRequest.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AddTrialMeasurementRequest.java
@@ -22,7 +22,8 @@
*
*
*
- * Request message for [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement].
+ * Request message for
+ * [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddTrialMeasurementRequest}
@@ -356,7 +357,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Request message for [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement].
+ * Request message for
+ * [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.AddTrialMeasurementRequest}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Annotation.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Annotation.java
index 75e9b47632bf..7bd8884bde37 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Annotation.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Annotation.java
@@ -136,8 +136,9 @@ public com.google.protobuf.ByteString getNameBytes() {
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -165,8 +166,9 @@ public java.lang.String getPayloadSchemaUri() {
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -344,8 +346,8 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -368,8 +370,8 @@ public java.lang.String getEtag() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -473,7 +475,8 @@ public int getLabelsCount() {
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -486,7 +489,8 @@ public int getLabelsCount() {
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -509,7 +513,8 @@ public java.util.Map
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -522,7 +527,8 @@ public java.util.Map
*
@@ -536,7 +542,8 @@ public java.util.Map
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -549,7 +556,8 @@ public java.util.Map
*
@@ -567,7 +575,8 @@ public java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.Strin
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -580,7 +589,8 @@ public java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.Strin
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -1275,8 +1285,9 @@ public Builder setNameBytes(com.google.protobuf.ByteString value) {
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -1303,8 +1314,9 @@ public java.lang.String getPayloadSchemaUri() {
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -1331,8 +1343,9 @@ public com.google.protobuf.ByteString getPayloadSchemaUriBytes() {
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -1358,8 +1371,9 @@ public Builder setPayloadSchemaUri(java.lang.String value) {
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -1381,8 +1395,9 @@ public Builder clearPayloadSchemaUri() {
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -2000,8 +2015,8 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -2023,8 +2038,8 @@ public java.lang.String getEtag() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -2046,8 +2061,8 @@ public com.google.protobuf.ByteString getEtagBytes() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -2068,8 +2083,8 @@ public Builder setEtag(java.lang.String value) {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -2086,8 +2101,8 @@ public Builder clearEtag() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -2340,7 +2355,8 @@ public int getLabelsCount() {
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -2353,7 +2369,8 @@ public int getLabelsCount() {
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -2376,7 +2393,8 @@ public java.util.Map
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -2389,7 +2407,8 @@ public java.util.Map
*
@@ -2403,7 +2422,8 @@ public java.util.Map
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -2416,7 +2436,8 @@ public java.util.Map
*
@@ -2435,7 +2456,8 @@ public java.lang.String getLabelsOrDefault(
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -2448,7 +2470,8 @@ public java.lang.String getLabelsOrDefault(
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -2474,7 +2497,8 @@ public Builder clearLabels() {
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -2487,7 +2511,8 @@ public Builder clearLabels() {
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -2509,7 +2534,8 @@ public java.util.Map
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -2522,7 +2548,8 @@ public java.util.Map
*
@@ -2543,7 +2570,8 @@ public Builder putLabels(java.lang.String key, java.lang.String value) {
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -2556,7 +2584,8 @@ public Builder putLabels(java.lang.String key, java.lang.String value) {
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationOrBuilder.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationOrBuilder.java
index a3df5ee5d172..e467ce242841 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationOrBuilder.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationOrBuilder.java
@@ -52,8 +52,9 @@ public interface AnnotationOrBuilder
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -70,8 +71,9 @@ public interface AnnotationOrBuilder
*
*
*
- * Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1.Annotation.payload]. The
- * schema is defined as an [OpenAPI 3.0.2 Schema
+ * Required. Google Cloud Storage URI points to a YAML file describing
+ * [payload][google.cloud.aiplatform.v1.Annotation.payload]. The schema is
+ * defined as an [OpenAPI 3.0.2 Schema
* Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
* The schema files that can be used here are found in
* gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the
@@ -203,8 +205,8 @@ public interface AnnotationOrBuilder
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -216,8 +218,8 @@ public interface AnnotationOrBuilder
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 8 [(.google.api.field_behavior) = OPTIONAL];
@@ -271,7 +273,8 @@ public interface AnnotationOrBuilder
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -284,7 +287,8 @@ public interface AnnotationOrBuilder
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -295,7 +299,8 @@ public interface AnnotationOrBuilder
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -308,7 +313,8 @@ public interface AnnotationOrBuilder
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -322,7 +328,8 @@ public interface AnnotationOrBuilder
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -335,7 +342,8 @@ public interface AnnotationOrBuilder
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -346,7 +354,8 @@ public interface AnnotationOrBuilder
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -359,7 +368,8 @@ public interface AnnotationOrBuilder
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
@@ -375,7 +385,8 @@ java.lang.String getLabelsOrDefault(
*
*
*
- * Optional. The labels with user-defined metadata to organize your Annotations.
+ * Optional. The labels with user-defined metadata to organize your
+ * Annotations.
* Label keys and values can be no longer than 64 characters
* (Unicode codepoints), can only contain lowercase letters, numeric
* characters, underscores and dashes. International characters are allowed.
@@ -388,7 +399,8 @@ java.lang.String getLabelsOrDefault(
* optional, name of the UI's annotation set this Annotation belongs to.
* If not set, the Annotation is not visible in the UI.
* * "aiplatform.googleapis.com/payload_schema":
- * output only, its value is the [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ * output only, its value is the
+ * [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
* title.
*
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpec.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpec.java
index 97e332fa681a..6ef063a4d80e 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpec.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpec.java
@@ -275,8 +275,8 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -299,8 +299,8 @@ public java.lang.String getEtag() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -1390,8 +1390,8 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -1413,8 +1413,8 @@ public java.lang.String getEtag() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -1436,8 +1436,8 @@ public com.google.protobuf.ByteString getEtagBytes() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -1458,8 +1458,8 @@ public Builder setEtag(java.lang.String value) {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -1476,8 +1476,8 @@ public Builder clearEtag() {
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpecOrBuilder.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpecOrBuilder.java
index 492e7d0bad43..e7f93b36f382 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpecOrBuilder.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AnnotationSpecOrBuilder.java
@@ -157,8 +157,8 @@ public interface AnnotationSpecOrBuilder
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
@@ -170,8 +170,8 @@ public interface AnnotationSpecOrBuilder
*
*
*
- * Optional. Used to perform consistent read-modify-write updates. If not set, a blind
- * "overwrite" update happens.
+ * Optional. Used to perform consistent read-modify-write updates. If not set,
+ * a blind "overwrite" update happens.
*
*
* string etag = 5 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Attribution.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Attribution.java
index 3f6cb6ad54e5..babc2e4729f0 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Attribution.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/Attribution.java
@@ -75,12 +75,14 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
*
*
*
- * Output only. Model predicted output if the input instance is constructed from the
- * baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Model predicted output if the input instance is constructed
+ * from the baselines of all the features defined in
+ * [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The field name of the output is determined by the key in
* [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model's predicted output has multiple dimensions (rank > 1), this is
- * the value in the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the value in the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
* If there are multiple baselines, their output values are averaged.
*
*
@@ -101,9 +103,11 @@ public double getBaselineOutputValue() {
*
* Output only. Model predicted output on the corresponding [explanation
* instance][ExplainRequest.instances]. The field name of the output is
- * determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
+ * determined by the key in
+ * [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model predicted output has multiple dimensions, this is the value in
- * the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
*
*
* double instance_output_value = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
@@ -121,12 +125,15 @@ public double getInstanceOutputValue() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -137,10 +144,13 @@ public double getInstanceOutputValue() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -157,12 +167,15 @@ public boolean hasFeatureAttributions() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -173,10 +186,13 @@ public boolean hasFeatureAttributions() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -195,12 +211,15 @@ public com.google.protobuf.Value getFeatureAttributions() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -211,10 +230,13 @@ public com.google.protobuf.Value getFeatureAttributions() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -296,8 +318,9 @@ public int getOutputIndex(int index) {
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -324,8 +347,9 @@ public java.lang.String getOutputDisplayName() {
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -355,19 +379,25 @@ public com.google.protobuf.ByteString getOutputDisplayNameBytes() {
*
*
*
- * Output only. Error of [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] caused by approximation used in the
- * explanation method. Lower value means more precise attributions.
+ * Output only. Error of
+ * [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]
+ * caused by approximation used in the explanation method. Lower value means
+ * more precise attributions.
* * For Sampled Shapley
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution],
- * increasing [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] might reduce
- * the error.
+ * increasing
+ * [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count]
+ * might reduce the error.
* * For Integrated Gradients
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
- * increasing [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] might
- * reduce the error.
- * * For [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
* increasing
- * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might reduce the error.
+ * [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count]
+ * might reduce the error.
+ * * For [XRAI
+ * attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
+ * increasing
+ * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might
+ * reduce the error.
* See [this introduction](/vertex-ai/docs/explainable-ai/overview)
* for more information.
*
@@ -962,12 +992,14 @@ public Builder mergeFrom(
*
*
*
- * Output only. Model predicted output if the input instance is constructed from the
- * baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Model predicted output if the input instance is constructed
+ * from the baselines of all the features defined in
+ * [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The field name of the output is determined by the key in
* [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model's predicted output has multiple dimensions (rank > 1), this is
- * the value in the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the value in the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
* If there are multiple baselines, their output values are averaged.
*
*
@@ -983,12 +1015,14 @@ public double getBaselineOutputValue() {
*
*
*
- * Output only. Model predicted output if the input instance is constructed from the
- * baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Model predicted output if the input instance is constructed
+ * from the baselines of all the features defined in
+ * [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The field name of the output is determined by the key in
* [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model's predicted output has multiple dimensions (rank > 1), this is
- * the value in the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the value in the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
* If there are multiple baselines, their output values are averaged.
*
*
@@ -1007,12 +1041,14 @@ public Builder setBaselineOutputValue(double value) {
*
*
*
- * Output only. Model predicted output if the input instance is constructed from the
- * baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Model predicted output if the input instance is constructed
+ * from the baselines of all the features defined in
+ * [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The field name of the output is determined by the key in
* [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model's predicted output has multiple dimensions (rank > 1), this is
- * the value in the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the value in the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
* If there are multiple baselines, their output values are averaged.
*
*
@@ -1034,9 +1070,11 @@ public Builder clearBaselineOutputValue() {
*
* Output only. Model predicted output on the corresponding [explanation
* instance][ExplainRequest.instances]. The field name of the output is
- * determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
+ * determined by the key in
+ * [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model predicted output has multiple dimensions, this is the value in
- * the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
*
*
* double instance_output_value = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
@@ -1053,9 +1091,11 @@ public double getInstanceOutputValue() {
*
* Output only. Model predicted output on the corresponding [explanation
* instance][ExplainRequest.instances]. The field name of the output is
- * determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
+ * determined by the key in
+ * [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model predicted output has multiple dimensions, this is the value in
- * the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
*
*
* double instance_output_value = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
@@ -1075,9 +1115,11 @@ public Builder setInstanceOutputValue(double value) {
*
* Output only. Model predicted output on the corresponding [explanation
* instance][ExplainRequest.instances]. The field name of the output is
- * determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
+ * determined by the key in
+ * [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model predicted output has multiple dimensions, this is the value in
- * the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
*
*
* double instance_output_value = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
@@ -1101,12 +1143,15 @@ public Builder clearInstanceOutputValue() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1117,10 +1162,13 @@ public Builder clearInstanceOutputValue() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1136,12 +1184,15 @@ public boolean hasFeatureAttributions() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1152,10 +1203,13 @@ public boolean hasFeatureAttributions() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1177,12 +1231,15 @@ public com.google.protobuf.Value getFeatureAttributions() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1193,10 +1250,13 @@ public com.google.protobuf.Value getFeatureAttributions() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1220,12 +1280,15 @@ public Builder setFeatureAttributions(com.google.protobuf.Value value) {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1236,10 +1299,13 @@ public Builder setFeatureAttributions(com.google.protobuf.Value value) {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1260,12 +1326,15 @@ public Builder setFeatureAttributions(com.google.protobuf.Value.Builder builderF
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1276,10 +1345,13 @@ public Builder setFeatureAttributions(com.google.protobuf.Value.Builder builderF
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1307,12 +1379,15 @@ public Builder mergeFeatureAttributions(com.google.protobuf.Value value) {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1323,10 +1398,13 @@ public Builder mergeFeatureAttributions(com.google.protobuf.Value value) {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1348,12 +1426,15 @@ public Builder clearFeatureAttributions() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1364,10 +1445,13 @@ public Builder clearFeatureAttributions() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1383,12 +1467,15 @@ public com.google.protobuf.Value.Builder getFeatureAttributionsBuilder() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1399,10 +1486,13 @@ public com.google.protobuf.Value.Builder getFeatureAttributionsBuilder() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1422,12 +1512,15 @@ public com.google.protobuf.ValueOrBuilder getFeatureAttributionsOrBuilder() {
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -1438,10 +1531,13 @@ public com.google.protobuf.ValueOrBuilder getFeatureAttributionsOrBuilder() {
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -1631,8 +1727,9 @@ public Builder clearOutputIndex() {
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -1658,8 +1755,9 @@ public java.lang.String getOutputDisplayName() {
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -1685,8 +1783,9 @@ public com.google.protobuf.ByteString getOutputDisplayNameBytes() {
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -1711,8 +1810,9 @@ public Builder setOutputDisplayName(java.lang.String value) {
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -1733,8 +1833,9 @@ public Builder clearOutputDisplayName() {
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -1762,19 +1863,25 @@ public Builder setOutputDisplayNameBytes(com.google.protobuf.ByteString value) {
*
*
*
- * Output only. Error of [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] caused by approximation used in the
- * explanation method. Lower value means more precise attributions.
+ * Output only. Error of
+ * [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]
+ * caused by approximation used in the explanation method. Lower value means
+ * more precise attributions.
* * For Sampled Shapley
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution],
- * increasing [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] might reduce
- * the error.
+ * increasing
+ * [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count]
+ * might reduce the error.
* * For Integrated Gradients
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
- * increasing [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] might
- * reduce the error.
- * * For [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
* increasing
- * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might reduce the error.
+ * [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count]
+ * might reduce the error.
+ * * For [XRAI
+ * attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
+ * increasing
+ * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might
+ * reduce the error.
* See [this introduction](/vertex-ai/docs/explainable-ai/overview)
* for more information.
*
@@ -1791,19 +1898,25 @@ public double getApproximationError() {
*
*
*
- * Output only. Error of [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] caused by approximation used in the
- * explanation method. Lower value means more precise attributions.
+ * Output only. Error of
+ * [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]
+ * caused by approximation used in the explanation method. Lower value means
+ * more precise attributions.
* * For Sampled Shapley
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution],
- * increasing [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] might reduce
- * the error.
+ * increasing
+ * [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count]
+ * might reduce the error.
* * For Integrated Gradients
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
- * increasing [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] might
- * reduce the error.
- * * For [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
* increasing
- * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might reduce the error.
+ * [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count]
+ * might reduce the error.
+ * * For [XRAI
+ * attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
+ * increasing
+ * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might
+ * reduce the error.
* See [this introduction](/vertex-ai/docs/explainable-ai/overview)
* for more information.
*
@@ -1823,19 +1936,25 @@ public Builder setApproximationError(double value) {
*
*
*
- * Output only. Error of [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] caused by approximation used in the
- * explanation method. Lower value means more precise attributions.
+ * Output only. Error of
+ * [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]
+ * caused by approximation used in the explanation method. Lower value means
+ * more precise attributions.
* * For Sampled Shapley
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution],
- * increasing [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] might reduce
- * the error.
+ * increasing
+ * [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count]
+ * might reduce the error.
* * For Integrated Gradients
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
- * increasing [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] might
- * reduce the error.
- * * For [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
* increasing
- * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might reduce the error.
+ * [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count]
+ * might reduce the error.
+ * * For [XRAI
+ * attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
+ * increasing
+ * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might
+ * reduce the error.
* See [this introduction](/vertex-ai/docs/explainable-ai/overview)
* for more information.
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AttributionOrBuilder.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AttributionOrBuilder.java
index 1fa08a842ade..470297a4f6d3 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AttributionOrBuilder.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AttributionOrBuilder.java
@@ -27,12 +27,14 @@ public interface AttributionOrBuilder
*
*
*
- * Output only. Model predicted output if the input instance is constructed from the
- * baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Model predicted output if the input instance is constructed
+ * from the baselines of all the features defined in
+ * [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The field name of the output is determined by the key in
* [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model's predicted output has multiple dimensions (rank > 1), this is
- * the value in the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the value in the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
* If there are multiple baselines, their output values are averaged.
*
*
@@ -48,9 +50,11 @@ public interface AttributionOrBuilder
*
* Output only. Model predicted output on the corresponding [explanation
* instance][ExplainRequest.instances]. The field name of the output is
- * determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
+ * determined by the key in
+ * [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
* If the Model predicted output has multiple dimensions, this is the value in
- * the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
+ * the output located by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
*
*
* double instance_output_value = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
@@ -63,12 +67,15 @@ public interface AttributionOrBuilder
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -79,10 +86,13 @@ public interface AttributionOrBuilder
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -96,12 +106,15 @@ public interface AttributionOrBuilder
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -112,10 +125,13 @@ public interface AttributionOrBuilder
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -129,12 +145,15 @@ public interface AttributionOrBuilder
*
*
*
- * Output only. Attributions of each explained feature. Features are extracted from
- * the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
- * [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
+ * Output only. Attributions of each explained feature. Features are extracted
+ * from the [prediction
+ * instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according
+ * to [explanation metadata for
+ * inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
* The value is a struct, whose keys are the name of the feature. The values
- * are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
- * contributed to the predicted result.
+ * are how much the feature in the
+ * [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] contributed
+ * to the predicted result.
* The format of the value is determined by the feature's input format:
* * If the feature is a scalar value, the attribution value is a
* [floating number][google.protobuf.Value.number_value].
@@ -145,10 +164,13 @@ public interface AttributionOrBuilder
* attribution value struct are the same as the keys in the feature
* struct. The formats of the values in the attribution struct are
* determined by the formats of the values in the feature struct.
- * The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
- * pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
- * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
- * describes the features and their attribution values (if it is populated).
+ * The
+ * [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri]
+ * field, pointed to by the
+ * [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
+ * [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
+ * object, points to the schema file that describes the features and their
+ * attribution values (if it is populated).
*
*
*
@@ -214,8 +236,9 @@ public interface AttributionOrBuilder
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -231,8 +254,9 @@ public interface AttributionOrBuilder
*
*
*
- * Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
- * the predicted class name by a multi-classification Model.
+ * Output only. The display name of the output identified by
+ * [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For
+ * example, the predicted class name by a multi-classification Model.
* This field is only populated iff the Model predicts display names as a
* separate field along with the explained output. The predicted display name
* must has the same shape of the explained output, and can be located using
@@ -249,19 +273,25 @@ public interface AttributionOrBuilder
*
*
*
- * Output only. Error of [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] caused by approximation used in the
- * explanation method. Lower value means more precise attributions.
+ * Output only. Error of
+ * [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]
+ * caused by approximation used in the explanation method. Lower value means
+ * more precise attributions.
* * For Sampled Shapley
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution],
- * increasing [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] might reduce
- * the error.
+ * increasing
+ * [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count]
+ * might reduce the error.
* * For Integrated Gradients
* [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
- * increasing [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] might
- * reduce the error.
- * * For [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
* increasing
- * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might reduce the error.
+ * [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count]
+ * might reduce the error.
+ * * For [XRAI
+ * attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
+ * increasing
+ * [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might
+ * reduce the error.
* See [this introduction](/vertex-ai/docs/explainable-ai/overview)
* for more information.
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResources.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResources.java
index bb1ef62f32a5..b9bdfd598339 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResources.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResources.java
@@ -73,11 +73,12 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
*
*
*
- * Immutable. The minimum number of replicas this DeployedModel will be always deployed
- * on. If traffic against it increases, it may dynamically be deployed onto
- * more replicas up to [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some
- * of these extra replicas may be freed.
- * If the requested value is too large, the deployment will error.
+ * Immutable. The minimum number of replicas this DeployedModel will be always
+ * deployed on. If traffic against it increases, it may dynamically be
+ * deployed onto more replicas up to
+ * [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count],
+ * and as traffic decreases, some of these extra replicas may be freed. If the
+ * requested value is too large, the deployment will error.
*
*
* int32 min_replica_count = 1 [(.google.api.field_behavior) = IMMUTABLE];
@@ -95,15 +96,15 @@ public int getMinReplicaCount() {
*
*
*
- * Immutable. The maximum number of replicas this DeployedModel may be deployed on when
- * the traffic against it increases. If the requested value is too large,
- * the deployment will error, but if deployment succeeds then the ability
- * to scale the model to that many replicas is guaranteed (barring service
- * outages). If traffic against the DeployedModel increases beyond what its
- * replicas at maximum may handle, a portion of the traffic will be dropped.
- * If this value is not provided, a no upper bound for scaling under heavy
- * traffic will be assume, though Vertex AI may be unable to scale beyond
- * certain replica number.
+ * Immutable. The maximum number of replicas this DeployedModel may be
+ * deployed on when the traffic against it increases. If the requested value
+ * is too large, the deployment will error, but if deployment succeeds then
+ * the ability to scale the model to that many replicas is guaranteed (barring
+ * service outages). If traffic against the DeployedModel increases beyond
+ * what its replicas at maximum may handle, a portion of the traffic will be
+ * dropped. If this value is not provided, a no upper bound for scaling under
+ * heavy traffic will be assume, though Vertex AI may be unable to scale
+ * beyond certain replica number.
*
*
* int32 max_replica_count = 2 [(.google.api.field_behavior) = IMMUTABLE];
@@ -472,11 +473,12 @@ public Builder mergeFrom(
*
*
*
- * Immutable. The minimum number of replicas this DeployedModel will be always deployed
- * on. If traffic against it increases, it may dynamically be deployed onto
- * more replicas up to [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some
- * of these extra replicas may be freed.
- * If the requested value is too large, the deployment will error.
+ * Immutable. The minimum number of replicas this DeployedModel will be always
+ * deployed on. If traffic against it increases, it may dynamically be
+ * deployed onto more replicas up to
+ * [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count],
+ * and as traffic decreases, some of these extra replicas may be freed. If the
+ * requested value is too large, the deployment will error.
*
*
* int32 min_replica_count = 1 [(.google.api.field_behavior) = IMMUTABLE];
@@ -491,11 +493,12 @@ public int getMinReplicaCount() {
*
*
*
- * Immutable. The minimum number of replicas this DeployedModel will be always deployed
- * on. If traffic against it increases, it may dynamically be deployed onto
- * more replicas up to [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some
- * of these extra replicas may be freed.
- * If the requested value is too large, the deployment will error.
+ * Immutable. The minimum number of replicas this DeployedModel will be always
+ * deployed on. If traffic against it increases, it may dynamically be
+ * deployed onto more replicas up to
+ * [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count],
+ * and as traffic decreases, some of these extra replicas may be freed. If the
+ * requested value is too large, the deployment will error.
*
*
* int32 min_replica_count = 1 [(.google.api.field_behavior) = IMMUTABLE];
@@ -513,11 +516,12 @@ public Builder setMinReplicaCount(int value) {
*
*
*
- * Immutable. The minimum number of replicas this DeployedModel will be always deployed
- * on. If traffic against it increases, it may dynamically be deployed onto
- * more replicas up to [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some
- * of these extra replicas may be freed.
- * If the requested value is too large, the deployment will error.
+ * Immutable. The minimum number of replicas this DeployedModel will be always
+ * deployed on. If traffic against it increases, it may dynamically be
+ * deployed onto more replicas up to
+ * [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count],
+ * and as traffic decreases, some of these extra replicas may be freed. If the
+ * requested value is too large, the deployment will error.
*
*
* int32 min_replica_count = 1 [(.google.api.field_behavior) = IMMUTABLE];
@@ -536,15 +540,15 @@ public Builder clearMinReplicaCount() {
*
*
*
- * Immutable. The maximum number of replicas this DeployedModel may be deployed on when
- * the traffic against it increases. If the requested value is too large,
- * the deployment will error, but if deployment succeeds then the ability
- * to scale the model to that many replicas is guaranteed (barring service
- * outages). If traffic against the DeployedModel increases beyond what its
- * replicas at maximum may handle, a portion of the traffic will be dropped.
- * If this value is not provided, a no upper bound for scaling under heavy
- * traffic will be assume, though Vertex AI may be unable to scale beyond
- * certain replica number.
+ * Immutable. The maximum number of replicas this DeployedModel may be
+ * deployed on when the traffic against it increases. If the requested value
+ * is too large, the deployment will error, but if deployment succeeds then
+ * the ability to scale the model to that many replicas is guaranteed (barring
+ * service outages). If traffic against the DeployedModel increases beyond
+ * what its replicas at maximum may handle, a portion of the traffic will be
+ * dropped. If this value is not provided, a no upper bound for scaling under
+ * heavy traffic will be assume, though Vertex AI may be unable to scale
+ * beyond certain replica number.
*
*
* int32 max_replica_count = 2 [(.google.api.field_behavior) = IMMUTABLE];
@@ -559,15 +563,15 @@ public int getMaxReplicaCount() {
*
*
*
- * Immutable. The maximum number of replicas this DeployedModel may be deployed on when
- * the traffic against it increases. If the requested value is too large,
- * the deployment will error, but if deployment succeeds then the ability
- * to scale the model to that many replicas is guaranteed (barring service
- * outages). If traffic against the DeployedModel increases beyond what its
- * replicas at maximum may handle, a portion of the traffic will be dropped.
- * If this value is not provided, a no upper bound for scaling under heavy
- * traffic will be assume, though Vertex AI may be unable to scale beyond
- * certain replica number.
+ * Immutable. The maximum number of replicas this DeployedModel may be
+ * deployed on when the traffic against it increases. If the requested value
+ * is too large, the deployment will error, but if deployment succeeds then
+ * the ability to scale the model to that many replicas is guaranteed (barring
+ * service outages). If traffic against the DeployedModel increases beyond
+ * what its replicas at maximum may handle, a portion of the traffic will be
+ * dropped. If this value is not provided, a no upper bound for scaling under
+ * heavy traffic will be assume, though Vertex AI may be unable to scale
+ * beyond certain replica number.
*
*
* int32 max_replica_count = 2 [(.google.api.field_behavior) = IMMUTABLE];
@@ -585,15 +589,15 @@ public Builder setMaxReplicaCount(int value) {
*
*
*
- * Immutable. The maximum number of replicas this DeployedModel may be deployed on when
- * the traffic against it increases. If the requested value is too large,
- * the deployment will error, but if deployment succeeds then the ability
- * to scale the model to that many replicas is guaranteed (barring service
- * outages). If traffic against the DeployedModel increases beyond what its
- * replicas at maximum may handle, a portion of the traffic will be dropped.
- * If this value is not provided, a no upper bound for scaling under heavy
- * traffic will be assume, though Vertex AI may be unable to scale beyond
- * certain replica number.
+ * Immutable. The maximum number of replicas this DeployedModel may be
+ * deployed on when the traffic against it increases. If the requested value
+ * is too large, the deployment will error, but if deployment succeeds then
+ * the ability to scale the model to that many replicas is guaranteed (barring
+ * service outages). If traffic against the DeployedModel increases beyond
+ * what its replicas at maximum may handle, a portion of the traffic will be
+ * dropped. If this value is not provided, a no upper bound for scaling under
+ * heavy traffic will be assume, though Vertex AI may be unable to scale
+ * beyond certain replica number.
*
*
* int32 max_replica_count = 2 [(.google.api.field_behavior) = IMMUTABLE];
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResourcesOrBuilder.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResourcesOrBuilder.java
index 16049c561dce..47e34af849c3 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResourcesOrBuilder.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/AutomaticResourcesOrBuilder.java
@@ -27,11 +27,12 @@ public interface AutomaticResourcesOrBuilder
*
*
*
- * Immutable. The minimum number of replicas this DeployedModel will be always deployed
- * on. If traffic against it increases, it may dynamically be deployed onto
- * more replicas up to [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some
- * of these extra replicas may be freed.
- * If the requested value is too large, the deployment will error.
+ * Immutable. The minimum number of replicas this DeployedModel will be always
+ * deployed on. If traffic against it increases, it may dynamically be
+ * deployed onto more replicas up to
+ * [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count],
+ * and as traffic decreases, some of these extra replicas may be freed. If the
+ * requested value is too large, the deployment will error.
*
*
* int32 min_replica_count = 1 [(.google.api.field_behavior) = IMMUTABLE];
@@ -44,15 +45,15 @@ public interface AutomaticResourcesOrBuilder
*
*
*
- * Immutable. The maximum number of replicas this DeployedModel may be deployed on when
- * the traffic against it increases. If the requested value is too large,
- * the deployment will error, but if deployment succeeds then the ability
- * to scale the model to that many replicas is guaranteed (barring service
- * outages). If traffic against the DeployedModel increases beyond what its
- * replicas at maximum may handle, a portion of the traffic will be dropped.
- * If this value is not provided, a no upper bound for scaling under heavy
- * traffic will be assume, though Vertex AI may be unable to scale beyond
- * certain replica number.
+ * Immutable. The maximum number of replicas this DeployedModel may be
+ * deployed on when the traffic against it increases. If the requested value
+ * is too large, the deployment will error, but if deployment succeeds then
+ * the ability to scale the model to that many replicas is guaranteed (barring
+ * service outages). If traffic against the DeployedModel increases beyond
+ * what its replicas at maximum may handle, a portion of the traffic will be
+ * dropped. If this value is not provided, a no upper bound for scaling under
+ * heavy traffic will be assume, though Vertex AI may be unable to scale
+ * beyond certain replica number.
*
*
* int32 max_replica_count = 2 [(.google.api.field_behavior) = IMMUTABLE];
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequest.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequest.java
index eddb0d44895c..ed615f975d40 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequest.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequest.java
@@ -22,7 +22,8 @@
*
*
*
- * Request message for [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
+ * Request message for
+ * [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateFeaturesRequest}
@@ -74,8 +75,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -101,8 +102,8 @@ public java.lang.String getParent() {
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -131,10 +132,11 @@ public com.google.protobuf.ByteString getParentBytes() {
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -149,10 +151,11 @@ public java.util.List
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -168,10 +171,11 @@ public java.util.List
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -186,10 +190,11 @@ public int getRequestsCount() {
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -204,10 +209,11 @@ public com.google.cloud.aiplatform.v1.CreateFeatureRequest getRequests(int index
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -395,7 +401,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Request message for [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
+ * Request message for
+ * [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateFeaturesRequest}
@@ -627,8 +634,8 @@ public Builder mergeFrom(
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -653,8 +660,8 @@ public java.lang.String getParent() {
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -679,8 +686,8 @@ public com.google.protobuf.ByteString getParentBytes() {
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -704,8 +711,8 @@ public Builder setParent(java.lang.String value) {
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -725,8 +732,8 @@ public Builder clearParent() {
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -769,10 +776,11 @@ private void ensureRequestsIsMutable() {
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -790,10 +798,11 @@ public java.util.List
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -811,10 +820,11 @@ public int getRequestsCount() {
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -832,10 +842,11 @@ public com.google.cloud.aiplatform.v1.CreateFeatureRequest getRequests(int index
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -860,10 +871,11 @@ public Builder setRequests(
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -885,10 +897,11 @@ public Builder setRequests(
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -912,10 +925,11 @@ public Builder addRequests(com.google.cloud.aiplatform.v1.CreateFeatureRequest v
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -940,10 +954,11 @@ public Builder addRequests(
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -965,10 +980,11 @@ public Builder addRequests(
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -990,10 +1006,11 @@ public Builder addRequests(
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1015,10 +1032,11 @@ public Builder addAllRequests(
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1039,10 +1057,11 @@ public Builder clearRequests() {
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1063,10 +1082,11 @@ public Builder removeRequests(int index) {
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1081,10 +1101,11 @@ public com.google.cloud.aiplatform.v1.CreateFeatureRequest.Builder getRequestsBu
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1103,10 +1124,11 @@ public com.google.cloud.aiplatform.v1.CreateFeatureRequestOrBuilder getRequestsO
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1125,10 +1147,11 @@ public com.google.cloud.aiplatform.v1.CreateFeatureRequestOrBuilder getRequestsO
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1143,10 +1166,11 @@ public com.google.cloud.aiplatform.v1.CreateFeatureRequest.Builder addRequestsBu
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -1163,10 +1187,11 @@ public com.google.cloud.aiplatform.v1.CreateFeatureRequest.Builder addRequestsBu
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequestOrBuilder.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequestOrBuilder.java
index c453e6481a85..8dab54ba67da 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequestOrBuilder.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesRequestOrBuilder.java
@@ -27,8 +27,8 @@ public interface BatchCreateFeaturesRequestOrBuilder
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -43,8 +43,8 @@ public interface BatchCreateFeaturesRequestOrBuilder
*
*
*
- * Required. The resource name of the EntityType to create the batch of Features under.
- * Format:
+ * Required. The resource name of the EntityType to create the batch of
+ * Features under. Format:
* `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`
*
*
@@ -60,10 +60,11 @@ public interface BatchCreateFeaturesRequestOrBuilder
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -75,10 +76,11 @@ public interface BatchCreateFeaturesRequestOrBuilder
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -90,10 +92,11 @@ public interface BatchCreateFeaturesRequestOrBuilder
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -105,10 +108,11 @@ public interface BatchCreateFeaturesRequestOrBuilder
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
@@ -121,10 +125,11 @@ public interface BatchCreateFeaturesRequestOrBuilder
*
*
*
- * Required. The request message specifying the Features to create. All Features must be
- * created under the same parent EntityType. The `parent` field in each child
- * request message can be omitted. If `parent` is set in a child request, then
- * the value must match the `parent` value in this request message.
+ * Required. The request message specifying the Features to create. All
+ * Features must be created under the same parent EntityType. The `parent`
+ * field in each child request message can be omitted. If `parent` is set in a
+ * child request, then the value must match the `parent` value in this request
+ * message.
*
*
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesResponse.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesResponse.java
index d5d00a0dff99..29052c3a1a39 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesResponse.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateFeaturesResponse.java
@@ -22,7 +22,8 @@
*
*
*
- * Response message for [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
+ * Response message for
+ * [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateFeaturesResponse}
@@ -302,7 +303,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Response message for [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
+ * Response message for
+ * [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateFeaturesResponse}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsRequest.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsRequest.java
index 02f5a2a8d071..96089ffd5da8 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsRequest.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsRequest.java
@@ -22,7 +22,8 @@
*
*
*
- * Request message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
+ * Request message for
+ * [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateTensorboardRunsRequest}
@@ -392,7 +393,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Request message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
+ * Request message for
+ * [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateTensorboardRunsRequest}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsResponse.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsResponse.java
index 27b72142098d..e974908fdd93 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsResponse.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardRunsResponse.java
@@ -22,7 +22,8 @@
*
*
*
- * Response message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
+ * Response message for
+ * [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateTensorboardRunsResponse}
@@ -305,7 +306,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Response message for [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
+ * Response message for
+ * [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateTensorboardRunsResponse}
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequest.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequest.java
index 511d67683c32..048aa6df0b25 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequest.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequest.java
@@ -22,7 +22,8 @@
*
*
*
- * Request message for [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries].
+ * Request message for
+ * [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateTensorboardTimeSeriesRequest}
@@ -142,8 +143,8 @@ public com.google.protobuf.ByteString getParentBytes() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -159,8 +160,8 @@ public com.google.protobuf.ByteString getParentBytes() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -177,8 +178,8 @@ public com.google.protobuf.ByteString getParentBytes() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -193,8 +194,8 @@ public int getRequestsCount() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -209,8 +210,8 @@ public com.google.cloud.aiplatform.v1.CreateTensorboardTimeSeriesRequest getRequ
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -399,7 +400,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
*
*
*
- * Request message for [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries].
+ * Request message for
+ * [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries].
*
*
* Protobuf type {@code google.cloud.aiplatform.v1.BatchCreateTensorboardTimeSeriesRequest}
@@ -801,8 +803,8 @@ private void ensureRequestsIsMutable() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -821,8 +823,8 @@ private void ensureRequestsIsMutable() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -840,8 +842,8 @@ public int getRequestsCount() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -860,8 +862,8 @@ public com.google.cloud.aiplatform.v1.CreateTensorboardTimeSeriesRequest getRequ
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -886,8 +888,8 @@ public Builder setRequests(
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -910,8 +912,8 @@ public Builder setRequests(
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -936,8 +938,8 @@ public Builder addRequests(
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -962,8 +964,8 @@ public Builder addRequests(
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -985,8 +987,8 @@ public Builder addRequests(
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1009,8 +1011,8 @@ public Builder addRequests(
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1034,8 +1036,8 @@ public Builder addAllRequests(
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1056,8 +1058,8 @@ public Builder clearRequests() {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1078,8 +1080,8 @@ public Builder removeRequests(int index) {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1094,8 +1096,8 @@ public Builder removeRequests(int index) {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1114,8 +1116,8 @@ public Builder removeRequests(int index) {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1135,8 +1137,8 @@ public Builder removeRequests(int index) {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1154,8 +1156,8 @@ public Builder removeRequests(int index) {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -1174,8 +1176,8 @@ public Builder removeRequests(int index) {
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
diff --git a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequestOrBuilder.java b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequestOrBuilder.java
index 827719082a08..34e74e7167ed 100644
--- a/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequestOrBuilder.java
+++ b/java-aiplatform/proto-google-cloud-aiplatform-v1/src/main/java/com/google/cloud/aiplatform/v1/BatchCreateTensorboardTimeSeriesRequestOrBuilder.java
@@ -68,8 +68,8 @@ public interface BatchCreateTensorboardTimeSeriesRequestOrBuilder
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -82,8 +82,8 @@ public interface BatchCreateTensorboardTimeSeriesRequestOrBuilder
*
*
*
- * Required. The request message specifying the TensorboardTimeSeries to create.
- * A maximum of 1000 TensorboardTimeSeries can be created in a batch.
+ * Required. The request message specifying the TensorboardTimeSeries to
+ * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch.
*
*
*
@@ -95,8 +95,8 @@ public interface BatchCreateTensorboardTimeSeriesRequestOrBuilder
*
*
*