Skip to content

Commit

Permalink
feat(all): auto-regenerate discovery clients (#2211)
Browse files Browse the repository at this point in the history
  • Loading branch information
yoshi-automation committed Oct 11, 2023
1 parent b3a71bd commit df0b730
Show file tree
Hide file tree
Showing 22 changed files with 1,730 additions and 711 deletions.
29 changes: 5 additions & 24 deletions aiplatform/v1/aiplatform-api.json
Original file line number Diff line number Diff line change
Expand Up @@ -12997,7 +12997,7 @@
}
}
},
"revision": "20230929",
"revision": "20231002",
"rootUrl": "https://aiplatform.googleapis.com/",
"schemas": {
"GoogleApiHttpBody": {
Expand Down Expand Up @@ -16466,10 +16466,6 @@
"description": "An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations.",
"type": "string"
},
"filterSplit": {
"$ref": "GoogleCloudAiplatformV1ExportFilterSplit",
"description": "Split based on the provided filters for each set."
},
"fractionSplit": {
"$ref": "GoogleCloudAiplatformV1ExportFractionSplit",
"description": "Split based on fractions defining the size of each set."
Expand Down Expand Up @@ -16602,25 +16598,6 @@
"properties": {},
"type": "object"
},
"GoogleCloudAiplatformV1ExportFilterSplit": {
"description": "Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets.",
"id": "GoogleCloudAiplatformV1ExportFilterSplit",
"properties": {
"testFilter": {
"description": "Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order.",
"type": "string"
},
"trainingFilter": {
"description": "Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order.",
"type": "string"
},
"validationFilter": {
"description": "Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudAiplatformV1ExportFractionSplit": {
"description": "Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test.",
"id": "GoogleCloudAiplatformV1ExportFractionSplit",
Expand Down Expand Up @@ -18934,6 +18911,10 @@
"machineType": {
"description": "Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required.",
"type": "string"
},
"tpuTopology": {
"description": "Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: \"2x2x1\").",
"type": "string"
}
},
"type": "object"
Expand Down
63 changes: 4 additions & 59 deletions aiplatform/v1/aiplatform-gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

33 changes: 7 additions & 26 deletions aiplatform/v1beta1/aiplatform-api.json
Original file line number Diff line number Diff line change
Expand Up @@ -16092,7 +16092,7 @@
}
}
},
"revision": "20230929",
"revision": "20231002",
"rootUrl": "https://aiplatform.googleapis.com/",
"schemas": {
"GoogleApiHttpBody": {
Expand Down Expand Up @@ -19770,10 +19770,6 @@
"description": "An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in ListAnnotations.",
"type": "string"
},
"filterSplit": {
"$ref": "GoogleCloudAiplatformV1beta1ExportFilterSplit",
"description": "Split based on the provided filters for each set."
},
"fractionSplit": {
"$ref": "GoogleCloudAiplatformV1beta1ExportFractionSplit",
"description": "Split based on fractions defining the size of each set."
Expand Down Expand Up @@ -19939,25 +19935,6 @@
"properties": {},
"type": "object"
},
"GoogleCloudAiplatformV1beta1ExportFilterSplit": {
"description": "Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets.",
"id": "GoogleCloudAiplatformV1beta1ExportFilterSplit",
"properties": {
"testFilter": {
"description": "Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order.",
"type": "string"
},
"trainingFilter": {
"description": "Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order.",
"type": "string"
},
"validationFilter": {
"description": "Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order.",
"type": "string"
}
},
"type": "object"
},
"GoogleCloudAiplatformV1beta1ExportFractionSplit": {
"description": "Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test.",
"id": "GoogleCloudAiplatformV1beta1ExportFractionSplit",
Expand Down Expand Up @@ -20188,7 +20165,7 @@
"properties": {
"bigQuery": {
"$ref": "GoogleCloudAiplatformV1beta1FeatureGroupBigQuery",
"description": "Indicates that features for this group come from BigQuery."
"description": "Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source, which is required to have an entity_id and a feature_timestamp column in the source."
},
"createTime": {
"description": "Output only. Timestamp when this FeatureGroup was created.",
Expand Down Expand Up @@ -20624,7 +20601,7 @@
"type": "array"
},
"uri": {
"description": "Required. The Bigquery View URI that will be materialized on each sync trigger based on FeatureView.SyncConfig.",
"description": "Required. The BigQuery view URI that will be materialized on each sync trigger based on FeatureView.SyncConfig.",
"type": "string"
}
},
Expand Down Expand Up @@ -22931,6 +22908,10 @@
"machineType": {
"description": "Immutable. The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required.",
"type": "string"
},
"tpuTopology": {
"description": "Immutable. The topology of the TPUs. Corresponds to the TPU topologies available from GKE. (Example: tpu_topology: \"2x2x1\").",
"type": "string"
}
},
"type": "object"
Expand Down
70 changes: 9 additions & 61 deletions aiplatform/v1beta1/aiplatform-gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit df0b730

Please sign in to comment.